File size: 25,938 Bytes
4c61b7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 |
"""
Multi-Layer Perceptron with Custom Initialization
"""
import torch
import torch.nn as nn
# --- Added activation registry with custom sine (supports autograd) ---
class SinActFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.sin()
@staticmethod
def backward(ctx, grad_out):
(x,) = ctx.saved_tensors
return grad_out * x.cos()
class Sine(nn.Module):
def forward(self, x):
return SinActFunc.apply(x)
ACTIVATION_REGISTRY = {
'relu': lambda: nn.ReLU(),
'tanh': lambda: nn.Tanh(),
'sin': lambda: Sine(),
}
def get_activation(name: str):
key = name.lower()
if key not in ACTIVATION_REGISTRY:
raise ValueError(f"Unknown activation '{name}'. Available: {list(ACTIVATION_REGISTRY.keys())}")
return ACTIVATION_REGISTRY[key]()
def act_prime(mod, a):
"""
Unified activation derivative for manual Jacobian.
a: pre-activation tensor.
"""
if isinstance(mod, nn.ReLU):
return (a > 0).to(a.dtype)
if isinstance(mod, nn.LeakyReLU):
ns = getattr(mod, "negative_slope", 0.01)
return torch.where(a > 0, torch.ones_like(a), torch.full_like(a, ns))
if isinstance(mod, nn.Tanh):
ta = torch.tanh(a)
return 1.0 - ta * ta
if isinstance(mod, nn.Sigmoid):
s = torch.sigmoid(a)
return s * (1.0 - s)
if isinstance(mod, nn.Softplus):
beta = getattr(mod, "beta", 1.0)
return torch.sigmoid(beta * a)
if isinstance(mod, nn.SiLU): # Swish
s = torch.sigmoid(a)
return s * (1.0 + a * (1.0 - s))
if isinstance(mod, nn.ELU):
alpha = getattr(mod, "alpha", 1.0)
return torch.where(a > 0, torch.ones_like(a), alpha * torch.exp(a))
if isinstance(mod, nn.Identity):
return torch.ones_like(a)
if isinstance(mod, Sine):
return torch.cos(a)
raise NotImplementedError(f"Jacobian for activation {mod.__class__.__name__} not implemented.")
class MLPWithCustomInit(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim, activation=nn.ReLU, init_type='kaiming', activation_per_layer=None, dim_in_linear=[0, 0], dim_out_linear=0, hidden_dim_linear=[]):
super().__init__()
layers = []
# Store linear branch parameters
self.dim_in_linear = dim_in_linear
self.dim_out_linear = dim_out_linear
self.hidden_dim_linear = hidden_dim_linear
# Validate dim_in_linear format
if not isinstance(dim_in_linear, (list, tuple)) or len(dim_in_linear) != 2:
raise ValueError(f"dim_in_linear must be a list/tuple of length 2 [start_idx, end_idx], got {dim_in_linear}")
start_idx, end_idx = dim_in_linear
linear_input_size = end_idx - start_idx
# Create linear/MLP branch if specified
if linear_input_size > 0 and dim_out_linear > 0:
if start_idx < 0 or end_idx > input_dim or start_idx >= end_idx:
raise ValueError(f"Invalid dim_in_linear {dim_in_linear}: must satisfy 0 <= start < end <= {input_dim}")
if dim_out_linear > output_dim:
raise ValueError(f"dim_out_linear ({dim_out_linear}) cannot be larger than output_dim ({output_dim})")
# Create branch network (linear or MLP)
if len(hidden_dim_linear) == 0:
# Simple linear branch
self.linear_branch = nn.Linear(linear_input_size, dim_out_linear)
branch_type = 'linear_branch'
else:
# MLP branch
branch_layers = []
branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear]
for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])):
branch_layers.append(nn.Linear(in_dim, out_dim))
# Add activation for all layers except the last one
if i < len(branch_dims) - 2:
branch_layers.append(activation())
self.linear_branch = nn.Sequential(*branch_layers)
branch_type = 'mlp_branch'
# MLP output dimension is total minus branch output
mlp_output_dim = output_dim - dim_out_linear
total_output_dim = output_dim
else:
self.linear_branch = None
mlp_output_dim = output_dim
total_output_dim = output_dim
linear_input_size = 0
branch_type = None
dims = [input_dim] + hidden_dims
# Record layer information for debugging/analysis
self.layer_info = {
'input_dim': input_dim,
'hidden_dims': hidden_dims,
'output_dim': output_dim,
'mlp_output_dim': mlp_output_dim,
'total_output_dim': total_output_dim,
'dim_in_linear': dim_in_linear,
'linear_input_size': linear_input_size,
'dim_out_linear': dim_out_linear,
'hidden_dim_linear': hidden_dim_linear,
'branch_type': branch_type,
'total_layers': len(hidden_dims) + 1,
'layer_details': []
}
# Add branch info if exists
if self.linear_branch is not None:
if branch_type == 'linear_branch':
# Simple linear branch
self.layer_info['layer_details'].append({
'layer_idx': -1, # Special index for branch
'type': 'linear_branch',
'input_dim': linear_input_size,
'input_slice': f"[{start_idx}:{end_idx}]",
'output_dim': dim_out_linear,
'activation': 'None',
'parameters': linear_input_size * dim_out_linear + dim_out_linear
})
else:
# MLP branch - record each layer
branch_dims = [linear_input_size] + hidden_dim_linear + [dim_out_linear]
total_branch_params = 0
for i, (in_dim, out_dim) in enumerate(zip(branch_dims[:-1], branch_dims[1:])):
layer_params = in_dim * out_dim + out_dim
total_branch_params += layer_params
if i < len(branch_dims) - 2:
# Hidden layer in branch
self.layer_info['layer_details'].append({
'layer_idx': f"b{i}", # Branch layer index
'type': 'branch_hidden',
'input_dim': in_dim,
'output_dim': out_dim,
'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 else None,
'activation': activation().__class__.__name__,
'parameters': layer_params
})
else:
# Output layer in branch
self.layer_info['layer_details'].append({
'layer_idx': f"b{i}",
'type': 'branch_output',
'input_dim': in_dim,
'output_dim': out_dim,
'input_slice': f"[{start_idx}:{end_idx}]" if i == 0 and len(branch_dims) == 2 else None,
'activation': 'None',
'parameters': layer_params
})
# Handle activation per layer
if activation_per_layer is None:
activation_fns = [activation() for _ in hidden_dims]
else:
if len(activation_per_layer) != len(hidden_dims):
raise ValueError(f"Number of activations ({len(activation_per_layer)}) must match number of hidden layers ({len(hidden_dims)})")
activation_fns = [get_activation(a) for a in activation_per_layer]
for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):
layer_linear = nn.Linear(in_dim, out_dim)
activation_fn = activation_fns[i]
layers.append(layer_linear)
layers.append(activation_fn)
# Record layer details
self.layer_info['layer_details'].append({
'layer_idx': i,
'type': 'hidden',
'input_dim': in_dim,
'output_dim': out_dim,
'activation': activation_fn.__class__.__name__,
'parameters': in_dim * out_dim + out_dim
})
# Output layer - use mlp_output_dim instead of output_dim
output_layer = nn.Linear(dims[-1], mlp_output_dim)
layers.append(output_layer)
# Record output layer details
self.layer_info['layer_details'].append({
'layer_idx': len(hidden_dims),
'type': 'output',
'input_dim': dims[-1],
'output_dim': mlp_output_dim,
'activation': 'None',
'parameters': dims[-1] * mlp_output_dim + mlp_output_dim
})
# Calculate total parameters
self.layer_info['total_parameters'] = sum(detail['parameters'] for detail in self.layer_info['layer_details'])
self.net = nn.Sequential(*layers)
self.activation = activation
self.init_type = init_type
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
if self.init_type == 'xavier':
if isinstance(self.activation(), (nn.Tanh, nn.ReLU)):
gain = nn.init.calculate_gain('tanh' if isinstance(self.activation(), nn.Tanh) else 'relu')
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_uniform_(module.weight)
elif self.init_type == 'kaiming':
nn.init.kaiming_uniform_(module.weight, nonlinearity='relu')
elif self.init_type == 'orthogonal':
nn.init.orthogonal_(module.weight, gain=1.0)
elif self.init_type == 'small_normal':
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
def _apply_network(self, x):
"""Apply the complete network with linear branch if present"""
if self.linear_branch is not None:
# Extract slice from input for linear branch
start_idx, end_idx = self.dim_in_linear
x1 = x[..., start_idx:end_idx]
# Apply linear branch
linear_out = self.linear_branch(x1)
# Apply MLP to full input
mlp_out = self.net(x)
# Concatenate outputs: [linear_out; mlp_out]
return torch.cat([linear_out, mlp_out], dim=-1)
else:
# No linear branch, just apply MLP
return self.net(x)
def forward(self, t=None, x=None, u=None):
"""
Forward pass - handle both (t, x) and single argument calls, plus external input u
IMPORTANT: This must maintain compatibility with:
1. Hybrid models: encoder([], x_flat) and decoder([], z_flat)
2. Vector fields in torchdiffeq: vector_field(t, z) or vector_field(t, z, u)
3. Autoregressive models: ag_function(None, ag_input)
4. NEW: External input conditioning: vector_field(t, z, u)
"""
if x is None and u is None:
# Single argument call: forward(input)
# This handles autoregressive case: ag_function(ag_input)
return self._apply_network(t) # t is actually the input
elif u is None:
# Two argument call: forward(t, x)
# This handles hybrid case: encoder([], x_flat) where t=[] and x=x_flat
# And vector field case: vector_field(t, z) where we use z
return self._apply_network(x)
else:
# Three argument call: forward(t, x, u) - NEW for external conditioning
# Concatenate x and u for input to network
if isinstance(u, (int, float)):
# Scalar u, expand to match batch size
u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
else:
u_expanded = u
xu_input = torch.cat([x, u_expanded], dim=-1)
return self._apply_network(xu_input)
def forward_Jac(self, t=None, x=None, u=None):
"""
Manual forward + Jacobian for the MLP with branch support.
Returns:
y: [B, total_out_dim] where total_out_dim = dim_out_linear + mlp_output_dim
J: [B, total_out_dim, in_dim_eff], where in_dim_eff is the actual input fed to the network
The Jacobian accounts for the branch structure: output = [branch(x1); MLP(x)]
"""
# --- 1) Parse inputs to match your forward conventions ---
if x is None and u is None:
inp = t
elif u is None:
inp = x
else:
if isinstance(u, (int, float)):
u = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
inp = torch.cat([x, u], dim=-1)
B, Din = inp.shape
if self.linear_branch is not None:
# Extract slice from input for branch
start_idx, end_idx = self.dim_in_linear
x1 = inp[..., start_idx:end_idx]
# Apply branch network
if len(self.hidden_dim_linear) == 0:
# Simple linear branch: y_branch = A @ x1 + b
branch_out = self.linear_branch(x1) # [B, dim_out_linear]
# Construct Jacobian for linear branch
W_branch = self.linear_branch.weight # [dim_out_linear, linear_input_size]
J_branch = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype)
J_branch[:, :, start_idx:end_idx] = W_branch.unsqueeze(0).expand(B, -1, -1)
else:
# MLP branch - need to compute Jacobian through the branch network
y_branch = x1
J_branch = None
is_first_branch = True
a_cache_branch = None
# Propagate through branch layers
for i, module in enumerate(self.linear_branch):
if isinstance(module, nn.Linear):
W = module.weight # [out, in]
b = module.bias # [out]
a = y_branch @ W.t() + b # pre-activation
Wb = W.unsqueeze(0).expand(B, -1, -1) # [B, out, in]
if is_first_branch:
J_branch = Wb # [B, out, linear_input_size]
is_first_branch = False
else:
J_branch = torch.bmm(Wb, J_branch) # [B, out, linear_input_size]
y_branch = a
a_cache_branch = a
else:
# Activation function in branch
ap = act_prime(module, a_cache_branch) # [B, dim]
y_branch = module(y_branch)
J_branch = ap.unsqueeze(-1) * J_branch
branch_out = y_branch
# Expand J_branch to full input dimensions
J_branch_full = torch.zeros(B, self.dim_out_linear, Din, device=inp.device, dtype=inp.dtype)
J_branch_full[:, :, start_idx:end_idx] = J_branch
J_branch = J_branch_full
# MLP branch: apply full network to full input
y_mlp = inp
J_mlp = None
is_first = True
a_cache = None
# Propagate through MLP layers
for module in self.net:
if isinstance(module, nn.Linear):
W = module.weight # [out, in]
b = module.bias # [out]
a = y_mlp @ W.t() + b # pre-activation
Wb = W.unsqueeze(0).expand(B, -1, -1) # [B, out, in]
if is_first:
J_mlp = Wb # [B, out, Din]
is_first = False
else:
J_mlp = torch.bmm(Wb, J_mlp) # [B, out, Din]
y_mlp = a
a_cache = a
else:
# Activation function
ap = act_prime(module, a_cache) # [B, dim]
y_mlp = module(y_mlp)
if J_mlp is None:
I = torch.eye(Din, device=y_mlp.device, dtype=y_mlp.dtype)
J_mlp = I.unsqueeze(0).expand(B, Din, Din).clone()
is_first = False
J_mlp = ap.unsqueeze(-1) * J_mlp
# Combine outputs and Jacobians
y = torch.cat([branch_out, y_mlp], dim=-1) # [B, total_out_dim]
J = torch.cat([J_branch, J_mlp], dim=1) # [B, total_out_dim, Din]
else:
# No linear branch, use original implementation
y = inp
J = None
is_first = True
a_cache = None
for module in self.net:
if isinstance(module, nn.Linear):
W = module.weight
b = module.bias
a = y @ W.t() + b
Wb = W.unsqueeze(0).expand(B, -1, -1)
if is_first:
J = Wb
is_first = False
else:
J = torch.bmm(Wb, J)
y = a
a_cache = a
else:
ap = act_prime(module, a_cache)
y = module(y)
if J is None:
I = torch.eye(Din, device=y.device, dtype=y.dtype)
J = I.unsqueeze(0).expand(B, Din, Din).clone()
is_first = False
J = ap.unsqueeze(-1) * J
return y, J
def __call__(self, *args):
"""
Allow flexible calling - CRITICAL for compatibility
This ensures all calling patterns work correctly
"""
if len(args) == 1:
# Single argument: autoregressive case
return self._apply_network(args[0])
elif len(args) == 2:
# Two arguments: hybrid/ODE case - use second argument (actual data)
return self._apply_network(args[1])
elif len(args) == 3:
# Three arguments: external input case - concatenate second and third
x, u = args[1], args[2]
if isinstance(u, (int, float)):
u_expanded = torch.full((x.shape[0], 1), u, device=x.device, dtype=x.dtype)
else:
u_expanded = u
xu_input = torch.cat([x, u_expanded], dim=-1)
return self._apply_network(xu_input)
else:
raise ValueError(f"Expected 1, 2, or 3 arguments, got {len(args)}")
def get_layer_info(self):
"""Return detailed layer information"""
return self.layer_info
def print_architecture(self, name="MLP"):
"""Print a summary of the network architecture"""
print(f"\n{name} Architecture:")
print(f" Input dimension: {self.layer_info['input_dim']}")
if self.linear_branch is not None:
start_idx, end_idx = self.dim_in_linear
if self.layer_info['branch_type'] == 'linear_branch':
print(f" Linear branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['dim_out_linear']}")
else:
print(f" MLP branch: input[{start_idx}:{end_idx}] ({self.layer_info['linear_input_size']}) -> {self.layer_info['hidden_dim_linear']} -> {self.layer_info['dim_out_linear']}")
print(f" Total layers: {self.layer_info['total_layers']}")
print(f" MLP output dimension: {self.layer_info['mlp_output_dim']}")
print(f" Total output dimension: {self.layer_info['total_output_dim']}")
print(f" Total parameters: {self.layer_info['total_parameters']}")
print(" Layer details:")
for detail in self.layer_info['layer_details']:
if 'branch' in detail['type']:
layer_type = detail['type'].replace('_', ' ').title()
slice_info = f" {detail['input_slice']}" if detail.get('input_slice') else ""
print(f" {layer_type} {detail['layer_idx']}: "
f"{detail['input_dim']}{slice_info} -> {detail['output_dim']}, "
f"activation: {detail['activation']}, "
f"params: {detail['parameters']}")
else:
print(f" Layer {detail['layer_idx']} ({detail['type']}): "
f"{detail['input_dim']} -> {detail['output_dim']}, "
f"activation: {detail['activation']}, "
f"params: {detail['parameters']}")
# --- Example usage (adapt constructor to your class) ---
if __name__ == "__main__":
import time
def _batch_jacobian_autograd(model, x, *, mode="x"):
"""
Compute Jacobian batch-wise using torch.autograd.functional.jacobian
Returns: J_auto [B, out_dim, in_dim]
mode:
- "x": calls model.forward(inp) where inp=x
- "tx": calls model.forward(t=None, x=inp)
"""
x = x.detach()
B, Din = x.shape
if mode == "x":
def f_single(x_single): # x_single: [Din]
y_single = model.forward(x_single.unsqueeze(0)).squeeze(0) # [Dout]
return y_single
elif mode == "tx":
def f_single(x_single):
y_single = model.forward(None, x_single.unsqueeze(0)).squeeze(0)
return y_single
else:
raise NotImplementedError
J_list = []
for b in range(B):
xb = x[b].clone().requires_grad_(True)
Jb = torch.autograd.functional.jacobian(
f_single, xb, create_graph=False, vectorize=True
) # [Dout, Din]
J_list.append(Jb)
return torch.stack(J_list, dim=0) # [B, Dout, Din]
@torch.no_grad()
def check_manual_jacobian(model, x, *, atol=1e-6, rtol=1e-5, mode="x", verbose=True):
"""
Compare manual Jacobian (forward_Jac) vs. autograd Jacobian.
Args:
model: your MLP instance (with .forward and .forward_Jac)
x: [B, in_dim] input
atol, rtol: tolerances for allclose
mode: see _batch_jacobian_autograd; default assumes forward(x)
Prints max abs diff and raises AssertionError if mismatch.
"""
start_manual = time.time()
y_man, J_man = model.forward_Jac(x) if mode == "x" else model.forward_Jac(None, x)
end_manual = time.time()
print(f"Manual Jacobian time: {end_manual - start_manual:.4f} seconds")
J_auto = _batch_jacobian_autograd(model, x, mode=mode)
start_auto = time.time()
J_auto = _batch_jacobian_autograd(model, x, mode=mode)
end_auto = time.time()
print(f"Autograd Jacobian time: {end_auto - start_auto:.4f} seconds")
# print(J_man)
# print(J_auto)
if J_auto.shape != J_man.shape:
raise RuntimeError(f"Shape mismatch: auto {J_auto.shape} vs manual {J_man.shape}")
max_abs = (J_auto - J_man).abs().max().item()
ok = torch.allclose(J_auto, J_man, atol=atol, rtol=rtol)
if verbose:
print(f"[Jacobian check] shape={tuple(J_man.shape)}, max|Δ|={max_abs:.3e}, "
f"allclose(atol={atol}, rtol={rtol})={ok}")
assert ok, f"Jacobian mismatch: max|Δ|={max_abs:.3e} exceeds tolerances."
torch.manual_seed(0)
# Test with MLP branch using slice [1, 4] - total output dim is 7 (3 from branch + 4 from MLP)
model = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[8, 6]).to('cuda')
x = torch.randn(512, 6).to('cuda')
_ = model(x) # warmup
print(f"Input shape: {x.shape}")
print(f"Output shape: {model(x).shape}")
model.print_architecture("MLP with MLP Branch (slice [1:4])")
check_manual_jacobian(model, x, mode="x")
# Test with linear branch (empty hidden_dim_linear)
model_linear = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[1, 4], dim_out_linear=3, hidden_dim_linear=[]).to('cuda')
model_linear.print_architecture("MLP with Linear Branch")
check_manual_jacobian(model_linear, x, mode="x")
# Test without branch (original behavior)
model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda')
_ = model_orig(x)
print(f"Original output shape: {model_orig(x).shape}")
check_manual_jacobian(model_orig, x, mode="x")
# Test with legacy int format (should work as [0, 2])
model_legacy = MLPWithCustomInit(6, [64] * 3, 7, dim_in_linear=[0, 2], dim_out_linear=3).to('cuda')
print(f"Legacy format dim_in_linear=2 -> {model_legacy.dim_in_linear}")
model_legacy.print_architecture("MLP with Linear Branch (legacy format)")
# Test without linear branch (original behavior)
model_orig = MLPWithCustomInit(6, [64] * 3, 4).to('cuda')
_ = model_orig(x)
print(f"Original output shape: {model_orig(x).shape}")
check_manual_jacobian(model_orig, x, mode="x")
|