|
|
""" |
|
|
Multi-Layer Perceptron with Custom Initialization |
|
|
""" |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
|
|
|
|
|
|
class SinActFunc(torch.autograd.Function): |
|
|
@staticmethod |
|
|
def forward(ctx, x): |
|
|
ctx.save_for_backward(x) |
|
|
return x.sin() |
|
|
@staticmethod |
|
|
def backward(ctx, grad_out): |
|
|
(x,) = ctx.saved_tensors |
|
|
return grad_out * x.cos() |
|
|
|
|
|
class Sine(nn.Module): |
|
|
def forward(self, x): |
|
|
return SinActFunc.apply(x) |
|
|
|
|
|
ACTIVATION_REGISTRY = { |
|
|
'relu': lambda: nn.ReLU(), |
|
|
'tanh': lambda: nn.Tanh(), |
|
|
'sin': lambda: Sine(), |
|
|
} |
|
|
|
|
|
def get_activation(name: str): |
|
|
key = name.lower() |
|
|
if key not in ACTIVATION_REGISTRY: |
|
|
raise ValueError(f"Unknown activation '{name}'. Available: {list(ACTIVATION_REGISTRY.keys())}") |
|
|
return ACTIVATION_REGISTRY[key]() |
|
|
|
|
|
|
|
|
def act_prime(mod, a): |
|
|
""" |
|
|
Unified activation derivative for manual Jacobian. |
|
|
a: pre-activation tensor. |
|
|
""" |
|
|
if isinstance(mod, nn.ReLU): |
|
|
return (a > 0).to(a.dtype) |
|
|
if isinstance(mod, nn.LeakyReLU): |
|
|
ns = getattr(mod, "negative_slope", 0.01) |
|
|
return torch.where(a > 0, torch.ones_like(a), torch.full_like(a, ns)) |
|
|
if isinstance(mod, nn.Tanh): |
|
|
ta = torch.tanh(a) |
|
|
return 1.0 - ta * ta |
|
|
if isinstance(mod, nn.Sigmoid): |
|
|
s = torch.sigmoid(a) |
|
|
return s * (1.0 - s) |
|
|
if isinstance(mod, nn.Softplus): |
|
|
beta = getattr(mod, "beta", 1.0) |
|
|
return torch.sigmoid(beta * a) |
|
|
if isinstance(mod, nn.SiLU): |
|
|
s = torch.sigmoid(a) |
|
|
return s * (1.0 + a * (1.0 - s)) |
|
|
if isinstance(mod, nn.ELU): |
|
|
alpha = getattr(mod, "alpha", 1.0) |
|
|
return torch.where(a > 0, torch.ones_like(a), alpha * torch.exp(a)) |
|
|
if isinstance(mod, nn.Identity): |
|
|
return torch.ones_like(a) |
|
|
if isinstance(mod, Sine): |
|
|
return torch.cos(a) |
|
|
raise NotImplementedError(f"Jacobian for activation {mod.__class__.__name__} not implemented.") |
|
|
|
|
|
class MLPWithCustomInit(nn.Module): |
|
|
def __init__(self, input_dim, hidden_dims, output_dim, activation=nn.ReLU, init_type='kaiming', activation_per_layer=None, dim_in_linear=[0, 0], dim_out_linear=0, hidden_dim_linear=[]): |
|
|
super().__init__() |
|
|
layers = [] |
|
|
|
|
|
|
|
|
self.dim_in_linear = dim_in_linear |
|
|
self.dim_out_linear = dim_out_linear |
|
|
self.hidden_dim_linear = hidden_dim_linear |
|
|
|
|
|
|
|
|
if not isinstance(dim_in_linear, (list, tuple)) or len(dim_in_linear) != 2: |
|
|
raise ValueError(f"dim_in_linear must be a list/tuple of length 2 [start_idx, end_idx], got {dim_in_linear}") |
|
|
|
|
|
start_idx, end_idx = dim_in_linear |
|
|
linear_input_size = end_idx - start_idx |
|
|
|
|
|
|
|
|
if linear_input_size > 0 and dim_out_linear > 0: |
|
|
if start_idx < 0 or end_idx > input_dim or start_idx >= end_idx: |
|
|
raise ValueError(f"Invalid dim_in_linear {dim_in_linear}: must satisfy 0 <= start < end <= {input_dim}") |
|
|
if dim_out_linear > output_dim: |
|
|
raise ValueError(f"dim_out_linear ({dim_out_linear}) cannot be larger than output_dim ({output_dim})") |
|
|
|
|
|
|
|
|
if len(hidden_dim_linear) == 0: |
|
|
|
|
|
self.linear_branch = nn.Linear(linear_input_size, dim_out_linear) |
|
|
branch_type = 'linear_branch' |
|
|
else: |
|
|
|
|
|
branch_layers = [] |
|
|
branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear] |
|
|
|
|
|
for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])): |
|
|
branch_layers.append(nn.Linear(in_dim, out_dim)) |
|
|
|
|
|
if i < len(branch_dims) - 2: |
|
|
branch_layers.append(activation()) |
|
|
|
|
|
self.linear_branch = nn.Sequential(*branch_layers) |
|
|
branch_type = 'mlp_branch' |
|
|
|
|
|
|
|
|
mlp_output_dim = output_dim - dim_out_linear |
|
|
total_output_dim = output_dim |
|
|
else: |
|
|
self.linear_branch = None |
|
|
mlp_output_dim = output_dim |
|
|
total_output_dim = output_dim |
|
|
linear_input_size = 0 |
|
|
branch_type = None |
|
|
|
|
|
dims = [input_dim] + hidden_dims |
|
|
|
|
|
|
|
|
self.layer_info = { |
|
|
'input_dim': input_dim, |
|
|
'hidden_dims': hidden_dims, |
|
|
'output_dim': output_dim, |
|
|
'mlp_output_dim': mlp_output_dim, |
|
|
'total_output_dim': total_output_dim, |
|
|
'dim_in_linear': dim_in_linear, |
|
|
'linear_input_size': linear_input_size, |
|
|
'dim_out_linear': dim_out_linear, |
|
|
'hidden_dim_linear': hidden_dim_linear, |
|
|
'branch_type': branch_type, |
|
|
'total_layers': len(hidden_dims) + 1, |
|
|
'layer_details': [] |
|
|
} |
|
|
|
|
|
|
|
|
if self.linear_branch is not None: |
|
|
if branch_type == 'linear_branch': |
|
|
|
|
|
self.layer_info['layer_details'].append({ |
|
|
'layer_idx': -1, |
|
|
'type': 'linear_branch', |
|
|
'input_dim': linear_input_size, |
|
|
'input_slice': f"[{start_idx}:{end_idx}]", |
|
|
'output_dim': dim_out_linear, |
|
|
'activation': 'None', |
|
|
'parameters': linear_input_size * dim_out_linear + dim_out_linear |
|
|
}) |
|
|
else: |
|
|
|
|
|
branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear] |
|
|
total_branch_params = 0 |
|
|
for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])): |
|
|
layer_params = in_dim * out_dim + out_dim |
|
|
total_branch_params += layer_params |
|
|
|
|
|
if i < len(branch_dims) - 2: |
|
|
|
|
|
self.layer_info['layer_details'].append({ |
|
|
'layer_idx': f"b{i}", |
|
|
'type': 'branch_hidden', |
|
|
'input_dim': in_dim, |
|
|
'output_dim': out_dim, |
|
|
'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 else None, |
|
|
'activation': activation().__class__.__name__, |
|
|
'parameters': layer_params |
|
|
}) |
|
|
else: |
|
|
|
|
|
self.layer_info['layer_details'].append({ |
|
|
'layer_idx': f"b{i}", |
|
|
'type': 'branch_output', |
|
|
'input_dim': in_dim, |
|
|
'output_dim': out_dim, |
|
|
'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 and len(branch_dims) == 2 else None, |
|
|
'activation': 'None', |
|
|
'parameters': layer_params |
|
|
}) |
|
|
|
|
|
|
|
|
if activation_per_layer is None: |
|
|
activation_fns = [activation() for _ in hidden_dims] |
|
|
else: |
|
|
if len(activation_per_layer) != len(hidden_dims): |
|
|
raise ValueError(f"Number of activations ({len(activation_per_layer)}) must match number of hidden layers ({len(hidden_dims)})") |
|
|
activation_fns = [get_activation(a) for a in activation_per_layer] |
|
|
|
|
|
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): |
|
|
layer_linear = nn.Linear(in_dim, out_dim) |
|
|
activation_fn = activation_fns[i] |
|
|
layers.append(layer_linear) |
|
|
layers.append(activation_fn) |
|
|
|
|
|
self.layer_info['layer_details'].append({ |
|
|
'layer_idx': i, |
|
|
'type': 'hidden', |
|
|
'input_dim': in_dim, |
|
|
'output_dim': out_dim, |
|
|
'activation': activation_fn.__class__.__name__, |
|
|
'parameters': in_dim * out_dim + out_dim |
|
|
}) |
|
|
|
|
|
|
|
|
output_layer = nn.Linear(dims[-1], mlp_output_dim) |
|
|
layers.append(output_layer) |
|
|
|
|
|
|
|
|
self.layer_info['layer_details'].append({ |
|
|
'layer_idx': len(hidden_dims), |
|
|
'type': 'output', |
|
|
'input_dim': dims[-1], |
|
|
'output_dim': mlp_output_dim, |
|
|
'activation': 'None', |
|
|
'parameters': dims[-1] * mlp_output_dim + mlp_output_dim |
|
|
}) |
|
|
|
|
|
|
|
|
self.layer_info['total_parameters'] = sum(detail['parameters'] for detail in self.layer_info['layer_details']) |
|
|
|
|
|
self.net = nn.Sequential(*layers) |
|
|
self.activation = activation |
|
|
self.init_type = init_type |
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self, module): |
|
|
if isinstance(module, nn.Linear): |
|
|
if self.init_type == 'xavier': |
|
|
if isinstance(self.activation(), (nn.Tanh, nn.ReLU)): |
|
|
gain = nn.init.calculate_gain('tanh' if isinstance(self.activation(), nn.Tanh) else 'relu') |
|
|
nn.init.xavier_uniform_(module.weight, gain=gain) |
|
|
else: |
|
|
nn.init.xavier_uniform_(module.weight) |
|
|
elif self.init_type == 'kaiming': |
|
|
nn.init.kaiming_uniform_(module.weight, nonlinearity='relu') |
|
|
elif self.init_type == 'orthogonal': |
|
|
nn.init.orthogonal_(module.weight, gain=1.0) |
|
|
elif self.init_type == 'small_normal': |
|
|
nn.init.normal_(module.weight, mean=0.0, std=0.01) |
|
|
if module.bias is not None: |
|
|
nn.init.zeros_(module.bias) |
|
|
|
|
|
def _apply_network(self, x): |
|
|
"""Apply the complete network with linear branch if present""" |
|
|
if self.linear_branch is not None: |
|
|
|
|
|
start_idx, end_idx = self.dim_in_linear |
|
|
x1 = x[..., start_idx:end_idx] |
|
|
|
|
|
linear_out = self.linear_branch(x1) |
|
|
|
|
|
mlp_out = self.net(x) |
|
|
|
|
|
return torch.cat([linear_out, mlp_out], dim=-1) |
|
|
else: |
|
|
|
|
|
return self.net(x) |
|
|
|
|
|
def forward(self, t=None, x=None, u=None): |
|
|
""" |
|
|
Forward pass - handle both (t, x) and single argument calls, plus external input u |
|
|
IMPORTANT: This must maintain compatibility with: |
|
|
1. Hybrid models: encoder([], x_flat) and decoder([], z_flat) |
|
|
2. Vector fields in torchdiffeq: vector_field(t, z) or vector_field(t, z, u) |
|
|
3. Autoregressive models: ag_function(None, ag_input) |
|
|
4. NEW: External input conditioning: vector_field(t, z, u) |
|
|
""" |
|
|
if x is None and u is None: |
|
|
|
|
|
|
|
|
return self._apply_network(t) |
|
|
elif u is None: |
|
|
|
|
|
|
|
|
|
|
|
return self._apply_network(x) |
|
|
else: |
|
|
|
|
|
|
|
|
if isinstance(u, (int, float)): |
|
|
|
|
|
u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype) |
|
|
else: |
|
|
u_expanded = u |
|
|
xu_input = torch.cat([x, u_expanded], dim=-1) |
|
|
return self._apply_network(xu_input) |
|
|
|
|
|
def forward_Jac(self, t=None, x=None, u=None): |
|
|
""" |
|
|
Manual forward + Jacobian for the MLP with branch support. |
|
|
Returns: |
|
|
y: [B, total_out_dim] where total_out_dim = dim_out_linear + mlp_output_dim |
|
|
J: [B, total_out_dim, in_dim_eff], where in_dim_eff is the actual input fed to the network |
|
|
|
|
|
The Jacobian accounts for the branch structure: output = [branch(x1); MLP(x)] |
|
|
""" |
|
|
|
|
|
if x is None and u is None: |
|
|
inp = t |
|
|
elif u is None: |
|
|
inp = x |
|
|
else: |
|
|
if isinstance(u, (int, float)): |
|
|
u = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype) |
|
|
inp = torch.cat([x, u], dim=-1) |
|
|
|
|
|
B, Din = inp.shape |
|
|
|
|
|
if self.linear_branch is not None: |
|
|
|
|
|
start_idx, end_idx = self.dim_in_linear |
|
|
x1 = inp[..., start_idx:end_idx] |
|
|
|
|
|
|
|
|
if len(self.hidden_dim_linear) == 0: |
|
|
|
|
|
branch_out = self.linear_branch(x1) |
|
|
|
|
|
|
|
|
W_branch = self.linear_branch.weight |
|
|
J_branch = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype) |
|
|
J_branch[:, :, start_idx:end_idx] = W_branch.unsqueeze(0).expand(B, -1, -1) |
|
|
else: |
|
|
|
|
|
y_branch = x1 |
|
|
J_branch = None |
|
|
is_first_branch = True |
|
|
a_cache_branch = None |
|
|
|
|
|
|
|
|
for i, module in enumerate(self.linear_branch): |
|
|
if isinstance(module, nn.Linear): |
|
|
W = module.weight |
|
|
b = module.bias |
|
|
a = y_branch @ W.t() + b |
|
|
|
|
|
Wb = W.unsqueeze(0).expand(B, -1, -1) |
|
|
if is_first_branch: |
|
|
J_branch = Wb |
|
|
is_first_branch = False |
|
|
else: |
|
|
J_branch = torch.bmm(Wb, J_branch) |
|
|
|
|
|
y_branch = a |
|
|
a_cache_branch = a |
|
|
|
|
|
else: |
|
|
|
|
|
ap = act_prime(module, a_cache_branch) |
|
|
y_branch = module(y_branch) |
|
|
J_branch = ap.unsqueeze(-1) * J_branch |
|
|
|
|
|
branch_out = y_branch |
|
|
|
|
|
|
|
|
J_branch_full = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype) |
|
|
J_branch_full[:, :, start_idx:end_idx] = J_branch |
|
|
J_branch = J_branch_full |
|
|
|
|
|
|
|
|
y_mlp = inp |
|
|
J_mlp = None |
|
|
is_first = True |
|
|
a_cache = None |
|
|
|
|
|
|
|
|
for module in self.net: |
|
|
if isinstance(module, nn.Linear): |
|
|
W = module.weight |
|
|
b = module.bias |
|
|
a = y_mlp @ W.t() + b |
|
|
|
|
|
Wb = W.unsqueeze(0).expand(B, -1, -1) |
|
|
if is_first: |
|
|
J_mlp = Wb |
|
|
is_first = False |
|
|
else: |
|
|
J_mlp = torch.bmm(Wb, J_mlp) |
|
|
|
|
|
y_mlp = a |
|
|
a_cache = a |
|
|
|
|
|
else: |
|
|
|
|
|
ap = act_prime(module, a_cache) |
|
|
y_mlp = module(y_mlp) |
|
|
|
|
|
if J_mlp is None: |
|
|
I = torch.eye(Din, device=y_mlp.device, dtype=y_mlp.dtype) |
|
|
J_mlp = I.unsqueeze(0).expand(B, Din, Din).clone() |
|
|
is_first = False |
|
|
|
|
|
J_mlp = ap.unsqueeze(-1) * J_mlp |
|
|
|
|
|
|
|
|
y = torch.cat([branch_out, y_mlp], dim=-1) |
|
|
J = torch.cat([J_branch, J_mlp], dim=1) |
|
|
|
|
|
else: |
|
|
|
|
|
y = inp |
|
|
J = None |
|
|
is_first = True |
|
|
a_cache = None |
|
|
|
|
|
for module in self.net: |
|
|
if isinstance(module, nn.Linear): |
|
|
W = module.weight |
|
|
b = module.bias |
|
|
a = y @ W.t() + b |
|
|
|
|
|
Wb = W.unsqueeze(0).expand(B, -1, -1) |
|
|
if is_first: |
|
|
J = Wb |
|
|
is_first = False |
|
|
else: |
|
|
J = torch.bmm(Wb, J) |
|
|
|
|
|
y = a |
|
|
a_cache = a |
|
|
|
|
|
else: |
|
|
ap = act_prime(module, a_cache) |
|
|
y = module(y) |
|
|
|
|
|
if J is None: |
|
|
I = torch.eye(Din, device=y.device, dtype=y.dtype) |
|
|
J = I.unsqueeze(0).expand(B, Din, Din).clone() |
|
|
is_first = False |
|
|
|
|
|
J = ap.unsqueeze(-1) * J |
|
|
|
|
|
return y, J |
|
|
|
|
|
def __call__(self, *args): |
|
|
""" |
|
|
Allow flexible calling - CRITICAL for compatibility |
|
|
This ensures all calling patterns work correctly |
|
|
""" |
|
|
if len(args) == 1: |
|
|
|
|
|
return self._apply_network(args[0]) |
|
|
elif len(args) == 2: |
|
|
|
|
|
return self._apply_network(args[1]) |
|
|
elif len(args) == 3: |
|
|
|
|
|
x, u = args[1], args[2] |
|
|
if isinstance(u, (int, float)): |
|
|
u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype) |
|
|
else: |
|
|
u_expanded = u |
|
|
xu_input = torch.cat([x, u_expanded], dim=-1) |
|
|
return self._apply_network(xu_input) |
|
|
else: |
|
|
raise ValueError(f"Expected 1, 2, or 3 arguments, got {len(args)}") |
|
|
|
|
|
def get_layer_info(self): |
|
|
"""Return detailed layer information""" |
|
|
return self.layer_info |
|
|
|
|
|
def print_architecture(self, name="MLP"): |
|
|
"""Print a summary of the network architecture""" |
|
|
print(f"\n{name} Architecture:") |
|
|
print(f" Input dimension: {self.layer_info['input_dim']}") |
|
|
if self.linear_branch is not None: |
|
|
start_idx, end_idx = self.dim_in_linear |
|
|
if self.layer_info['branch_type'] == 'linear_branch': |
|
|
print(f" Linear branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['dim_out_linear']}") |
|
|
else: |
|
|
print(f" MLP branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['hidden_dim_linear']} -> {self.layer_info['dim_out_linear']}") |
|
|
print(f" Total layers: {self.layer_info['total_layers']}") |
|
|
print(f" MLP output dimension: {self.layer_info['mlp_output_dim']}") |
|
|
print(f" Total output dimension: {self.layer_info['total_output_dim']}") |
|
|
print(f" Total parameters: {self.layer_info['total_parameters']}") |
|
|
print(" Layer details:") |
|
|
for detail in self.layer_info['layer_details']: |
|
|
if 'branch' in detail['type']: |
|
|
layer_type = detail['type'].replace('_', ' ').title() |
|
|
slice_info = f" {detail['input_slice']}" if detail.get('input_slice') else "" |
|
|
print(f" {layer_type} {detail['layer_idx']}: " |
|
|
f"{detail['input_dim']}{slice_info} -> {detail['output_dim']}, " |
|
|
f"activation: {detail['activation']}, " |
|
|
f"params: {detail['parameters']}") |
|
|
else: |
|
|
print(f" Layer {detail['layer_idx']} ({detail['type']}): " |
|
|
f"{detail['input_dim']} -> {detail['output_dim']}, " |
|
|
f"activation: {detail['activation']}, " |
|
|
f"params: {detail['parameters']}") |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import time |
|
|
def _batch_jacobian_autograd(model, x, *, mode="x"): |
|
|
""" |
|
|
Compute Jacobian batch-wise using torch.autograd.functional.jacobian |
|
|
Returns: J_auto [B, out_dim, in_dim] |
|
|
mode: |
|
|
- "x": calls model.forward(inp) where inp=x |
|
|
- "tx": calls model.forward(t=None, x=inp) |
|
|
""" |
|
|
x = x.detach() |
|
|
B, Din = x.shape |
|
|
|
|
|
if mode == "x": |
|
|
def f_single(x_single): |
|
|
y_single = model.forward(x_single.unsqueeze(0)).squeeze(0) |
|
|
return y_single |
|
|
elif mode == "tx": |
|
|
def f_single(x_single): |
|
|
y_single = model.forward(None, x_single.unsqueeze(0)).squeeze(0) |
|
|
return y_single |
|
|
else: |
|
|
raise NotImplementedError |
|
|
|
|
|
J_list = [] |
|
|
for b in range(B): |
|
|
xb = x[b].clone().requires_grad_(True) |
|
|
Jb = torch.autograd.functional.jacobian( |
|
|
f_single, xb, create_graph=False, vectorize=True |
|
|
) |
|
|
J_list.append(Jb) |
|
|
return torch.stack(J_list, dim=0) |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def check_manual_jacobian(model, x, *, atol=1e-6, rtol=1e-5, mode="x", verbose=True): |
|
|
""" |
|
|
Compare manual Jacobian (forward_Jac) vs. autograd Jacobian. |
|
|
|
|
|
Args: |
|
|
model: your MLP instance (with .forward and .forward_Jac) |
|
|
x: [B, in_dim] input |
|
|
atol, rtol: tolerances for allclose |
|
|
mode: see _batch_jacobian_autograd; default assumes forward(x) |
|
|
|
|
|
Prints max abs diff and raises AssertionError if mismatch. |
|
|
""" |
|
|
start_manual = time.time() |
|
|
y_man, J_man = model.forward_Jac(x) if mode == "x" else model.forward_Jac(None, x) |
|
|
end_manual = time.time() |
|
|
print(f"Manual Jacobian time: {end_manual - start_manual:.4f} seconds") |
|
|
|
|
|
J_auto = _batch_jacobian_autograd(model, x, mode=mode) |
|
|
|
|
|
start_auto = time.time() |
|
|
J_auto = _batch_jacobian_autograd(model, x, mode=mode) |
|
|
end_auto = time.time() |
|
|
print(f"Autograd Jacobian time: {end_auto - start_auto:.4f} seconds") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if J_auto.shape != J_man.shape: |
|
|
raise RuntimeError(f"Shape mismatch: auto {J_auto.shape} vs manual {J_man.shape}") |
|
|
|
|
|
max_abs = (J_auto - J_man).abs().max().item() |
|
|
ok = torch.allclose(J_auto, J_man, atol=atol, rtol=rtol) |
|
|
|
|
|
if verbose: |
|
|
print(f"[Jacobian check] shape={tuple(J_man.shape)}, max|Δ|={max_abs:.3e}, " |
|
|
f"allclose(atol={atol}, rtol={rtol})={ok}") |
|
|
|
|
|
assert ok, f"Jacobian mismatch: max|Δ|={max_abs:.3e} exceeds tolerances." |
|
|
|
|
|
torch.manual_seed(0) |
|
|
|
|
|
model = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[8, 6]).to('cuda') |
|
|
x = torch.randn(512, 6).to('cuda') |
|
|
_ = model(x) |
|
|
print(f"Input shape: {x.shape}") |
|
|
print(f"Output shape: {model(x).shape}") |
|
|
model.print_architecture("MLP with MLP Branch (slice [1:4])") |
|
|
check_manual_jacobian(model, x, mode="x") |
|
|
|
|
|
|
|
|
model_linear = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[]).to('cuda') |
|
|
model_linear.print_architecture("MLP with Linear Branch") |
|
|
check_manual_jacobian(model_linear, x, mode="x") |
|
|
|
|
|
|
|
|
model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda') |
|
|
_ = model_orig(x) |
|
|
print(f"Original output shape: {model_orig(x).shape}") |
|
|
check_manual_jacobian(model_orig, x, mode="x") |
|
|
|
|
|
model_legacy = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[0, 2], dim_out_linear=3).to('cuda') |
|
|
print(f"Legacy format dim_in_linear=2 -> {model_legacy.dim_in_linear}") |
|
|
model_legacy.print_architecture("MLP with Linear Branch (legacy format)") |
|
|
|
|
|
|
|
|
model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda') |
|
|
_ = model_orig(x) |
|
|
print(f"Original output shape: {model_orig(x).shape}") |
|
|
check_manual_jacobian(model_orig, x, mode="x") |
|
|
|