AbstractPhil commited on
Commit
549de85
Β·
verified Β·
1 Parent(s): e3576d7

Create pytorch_compiled_kernel.py

Browse files
Files changed (1) hide show
  1. pytorch_compiled_kernel.py +1049 -0
pytorch_compiled_kernel.py ADDED
@@ -0,0 +1,1049 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CompiledEigh: torch.compile(fullgraph=True) drop-in for torch.linalg.eigh.
3
+
4
+ Eliminates all graph breaks, device-host syncs, and dynamic allocation.
5
+ Output contract matches torch.linalg.eigh:
6
+ eigenvalues: [*, n] real, ascending
7
+ eigenvectors: [*, n, n] orthonormal columns
8
+
9
+ Author: AbstractPhil / GeoLIP project
10
+ """
11
+
12
+ import math
13
+ import torch
14
+ import torch.nn as nn
15
+ from torch import Tensor
16
+ from typing import Tuple, Optional
17
+
18
+
19
+ # =============================================================================
20
+ # Constants
21
+ # =============================================================================
22
+
23
+ DEFAULT_MAX_NEWTON: int = 8
24
+ DEFAULT_MAX_JACOBI_SWEEPS: int = 10 # 10 sweeps saturates for n <= 16
25
+ JACOBI_THRESHOLD: int = 16
26
+
27
+
28
+ # =============================================================================
29
+ # Atom: 2x2 Symmetric Eigenproblem
30
+ # =============================================================================
31
+
32
+ def eigh_2x2(a: Tensor, b: Tensor, c: Tensor, eps: float = 1e-30
33
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
34
+ """
35
+ Closed-form eigendecomposition of batched 2x2 symmetric matrices.
36
+ Returns: (lambda1, lambda2, cos_theta, sin_theta), lambda1 <= lambda2.
37
+ """
38
+ trace = a + c
39
+ diff = a - c
40
+ two_b = 2.0 * b
41
+ hyp = torch.sqrt(diff * diff + two_b * two_b + eps)
42
+
43
+ lambda1 = 0.5 * (trace - hyp)
44
+ lambda2 = 0.5 * (trace + hyp)
45
+
46
+ vx = two_b
47
+ vy = lambda2 - a
48
+ norm_v = torch.sqrt(vx * vx + vy * vy + eps)
49
+ cos_theta = vy / norm_v
50
+ sin_theta = vx / norm_v
51
+
52
+ return lambda1, lambda2, cos_theta, sin_theta
53
+
54
+
55
+ # =============================================================================
56
+ # Utility: Newton-Schulz Orthogonalization (all-bmm, GPU-native)
57
+ # =============================================================================
58
+
59
+ def orthogonalize_ns(V: Tensor, n_iter: int = 2) -> Tensor:
60
+ """
61
+ Re-orthogonalize columns of V via Newton-Schulz iteration.
62
+
63
+ Computes V @ (V^T V)^{-1/2} using the coupled iteration:
64
+ X_0 = I, Y_0 = V^T V
65
+ X_{k+1} = 0.5 * X_k @ (3I - Y_k)
66
+ Y_{k+1} = 0.5 * (3I - Y_k) @ Y_k
67
+
68
+ X converges to (V^T V)^{-1/2}, Y converges to I.
69
+ Cubically convergent when V^T V β‰ˆ I.
70
+
71
+ Convergence from ||V^T V - I|| = Ξ΅:
72
+ 1 iteration: error β†’ O(Ρ²) β‰ˆ 1e-6 from 1e-3
73
+ 2 iterations: error β†’ O(Ρ⁴) β‰ˆ 1e-12 from 1e-3
74
+
75
+ All ops are bmm β€” fully compiled, no sequential column processing.
76
+
77
+ V: [B, n, n] (square, columns are approximate eigenvectors)
78
+ Returns: [B, n, n] with orthonormal columns
79
+ """
80
+ B, n, m = V.shape
81
+ I_n = torch.eye(m, device=V.device, dtype=V.dtype).unsqueeze(0).expand(B, -1, -1)
82
+
83
+ # Z = V^T V β‰ˆ I
84
+ Y = torch.bmm(V.transpose(-2, -1), V)
85
+ X = I_n.clone()
86
+
87
+ for _ in range(n_iter):
88
+ T = 3.0 * I_n - Y # (3I - Y_k)
89
+ X = 0.5 * torch.bmm(X, T) # X_{k+1}
90
+ Y = 0.5 * torch.bmm(T, Y) # Y_{k+1}
91
+
92
+ return torch.bmm(V, X)
93
+
94
+
95
+ # =============================================================================
96
+ # Phase 1: Householder Tridiagonalization
97
+ # =============================================================================
98
+
99
+ class HouseholderTridiagonalizer(nn.Module):
100
+ """
101
+ Reduces batched symmetric A to tridiagonal T = Q^T A Q via
102
+ Householder reflections. Fixed loop bounds, compilable.
103
+ """
104
+
105
+ def __init__(self, max_n: int, eps: float = 1e-30):
106
+ super().__init__()
107
+ self.max_n = max_n
108
+ self.eps = eps
109
+
110
+ def forward(self, A: Tensor, d: Tensor, e: Tensor,
111
+ reflectors: Tensor) -> None:
112
+ B, n, _ = A.shape
113
+ eps = self.eps
114
+
115
+ for k in range(n - 2):
116
+ tail_len = n - k - 1
117
+ x = A[:, k + 1:, k].clone()
118
+
119
+ sigma = torch.sqrt((x * x).sum(dim=-1, keepdim=True) + eps)
120
+ sign_x0 = torch.where(x[:, 0:1] >= 0,
121
+ torch.ones_like(sigma),
122
+ -torch.ones_like(sigma))
123
+ alpha = -sign_x0 * sigma
124
+
125
+ v = x.clone()
126
+ v[:, 0:1] = v[:, 0:1] - alpha
127
+ v_norm = torch.sqrt((v * v).sum(dim=-1, keepdim=True) + eps)
128
+ v = v / v_norm
129
+
130
+ reflectors[k, :, :tail_len] = v
131
+ if tail_len < n:
132
+ reflectors[k, :, tail_len:] = 0.0
133
+
134
+ sub_A = A[:, k + 1:, k + 1:]
135
+ v_col = v.unsqueeze(-1)
136
+
137
+ p = torch.bmm(sub_A, v_col).squeeze(-1)
138
+ vtp = (v * p).sum(dim=-1, keepdim=True)
139
+ q = p - vtp * v
140
+
141
+ q_col = q.unsqueeze(-1)
142
+ q_row = q.unsqueeze(-2)
143
+ v_row = v.unsqueeze(-2)
144
+
145
+ A[:, k + 1:, k + 1:] -= 2.0 * (v_col @ q_row + q_col @ v_row)
146
+ A[:, k, k + 1] = alpha.squeeze(-1)
147
+ A[:, k + 1, k] = alpha.squeeze(-1)
148
+
149
+ for i in range(n):
150
+ d[:, i] = A[:, i, i]
151
+ for i in range(n - 1):
152
+ e[:, i] = A[:, i, i + 1]
153
+
154
+
155
+ # =============================================================================
156
+ # Phase 2a: Secular Equation Newton Solver (Fixed Budget)
157
+ # =============================================================================
158
+
159
+ class SecularNewtonSolver(nn.Module):
160
+
161
+ def __init__(self, max_newton: int = DEFAULT_MAX_NEWTON,
162
+ eps: float = 1e-30, tol: float = 1e-7):
163
+ super().__init__()
164
+ self.max_newton = max_newton
165
+ self.eps = eps
166
+ self.tol = tol
167
+
168
+ def forward(self, delta: Tensor, z_sq: Tensor,
169
+ rho: Tensor, mask: Tensor) -> Tensor:
170
+ B, m = delta.shape
171
+ eps = self.eps
172
+ tol = self.tol
173
+
174
+ z_sq_sum = (z_sq * mask).sum(dim=-1, keepdim=True)
175
+ rho_abs = rho.abs().unsqueeze(-1)
176
+ upper_bound = delta[:, -1:] + z_sq_sum * rho_abs + 1.0
177
+
178
+ lo = delta + eps
179
+ hi = torch.cat([delta[:, 1:], upper_bound], dim=-1) - eps
180
+ lam = 0.5 * (lo + hi)
181
+ rho_exp = rho.unsqueeze(-1)
182
+
183
+ for _step in range(self.max_newton):
184
+ delta_exp = delta.unsqueeze(-1)
185
+ lam_exp = lam.unsqueeze(-2)
186
+ denom = delta_exp - lam_exp
187
+
188
+ denom_safe = torch.where(
189
+ denom.abs() < eps,
190
+ torch.full_like(denom, eps) * denom.sign().clamp(min=0.5),
191
+ denom
192
+ )
193
+
194
+ z_sq_exp = z_sq.unsqueeze(-1)
195
+ mask_exp = mask.unsqueeze(-1)
196
+ masked_z = z_sq_exp * mask_exp
197
+
198
+ terms = masked_z / denom_safe
199
+ f = 1.0 + rho_exp * terms.sum(dim=-2)
200
+ f_prime = rho_exp * (masked_z / (denom_safe * denom_safe)).sum(dim=-2)
201
+
202
+ f_prime_safe = torch.where(
203
+ f_prime.abs() < eps,
204
+ torch.full_like(f_prime, eps),
205
+ f_prime
206
+ )
207
+ delta_lam = -f / f_prime_safe
208
+ lam_new = torch.clamp(lam + delta_lam, lo, hi)
209
+
210
+ f_pos = f > 0
211
+ lo = torch.where(f_pos & mask.bool(), lam, lo)
212
+ hi = torch.where(~f_pos & mask.bool(), lam, hi)
213
+
214
+ converged = (f.abs() < tol) | ~mask.bool()
215
+ lam = torch.where(converged, lam, lam_new)
216
+
217
+ return lam
218
+
219
+
220
+ # =============================================================================
221
+ # Phase 2b: Eigenvectors from Secular Equation
222
+ # =============================================================================
223
+
224
+ def secular_eigenvectors(delta: Tensor, lam: Tensor, z: Tensor,
225
+ mask: Tensor, eps: float = 1e-30) -> Tensor:
226
+ delta_exp = delta.unsqueeze(-1)
227
+ lam_exp = lam.unsqueeze(-2)
228
+ denom = delta_exp - lam_exp
229
+
230
+ denom_safe = torch.where(
231
+ denom.abs() < eps,
232
+ torch.full_like(denom, eps) * denom.sign().clamp(min=0.5),
233
+ denom
234
+ )
235
+
236
+ z_exp = z.unsqueeze(-1)
237
+ mask_exp = mask.unsqueeze(-1)
238
+ V = (z_exp * mask_exp) / denom_safe
239
+ col_norms = torch.sqrt((V * V).sum(dim=-2, keepdim=True) + eps)
240
+ V = V / col_norms
241
+ return V
242
+
243
+
244
+ # =============================================================================
245
+ # Phase 2c: Fixed-Depth Tensor Tree D&C
246
+ # =============================================================================
247
+
248
+ class TensorTreeDC(nn.Module):
249
+
250
+ def __init__(self, max_n: int,
251
+ max_newton: int = DEFAULT_MAX_NEWTON,
252
+ eps: float = 1e-30, tol: float = 1e-7):
253
+ super().__init__()
254
+ self.padded_n = 1 << math.ceil(math.log2(max(max_n, 2)))
255
+ self.depth = int(math.log2(self.padded_n))
256
+ self.max_n = max_n
257
+ self.eps = eps
258
+ self.secular_solver = SecularNewtonSolver(
259
+ max_newton=max_newton, eps=eps, tol=tol
260
+ )
261
+
262
+ def forward(self, d: Tensor, e: Tensor) -> Tuple[Tensor, Tensor]:
263
+ B, n = d.shape
264
+ pn = self.padded_n
265
+ eps = self.eps
266
+ device = d.device
267
+ dtype = d.dtype
268
+
269
+ if n < pn:
270
+ d_max = d.abs().max(dim=-1, keepdim=True).values + 1.0
271
+ pad_diag = d_max + torch.arange(1, pn - n + 1, device=device, dtype=dtype).unsqueeze(0)
272
+ d_padded = torch.cat([d, pad_diag], dim=-1)
273
+ e_padded = torch.zeros(B, pn - 1, device=device, dtype=dtype)
274
+ e_padded[:, :n - 1] = e
275
+ else:
276
+ d_padded = d.clone()
277
+ e_padded = e.clone()
278
+
279
+ # DOWNWARD PASS
280
+ coupling_rho = []
281
+ current_d = d_padded.clone()
282
+ current_e = e_padded.clone()
283
+
284
+ for level in range(self.depth):
285
+ num_sub = 2 ** level
286
+ sub_size = pn // num_sub
287
+ half = sub_size // 2
288
+
289
+ cd = current_d.reshape(B, num_sub, sub_size)
290
+ ce = current_e.reshape(B, num_sub, sub_size - 1)
291
+
292
+ rho = ce[:, :, half - 1].clone()
293
+ coupling_rho.append(rho)
294
+
295
+ cd[:, :, half - 1] = cd[:, :, half - 1] - rho.abs()
296
+ cd[:, :, half] = cd[:, :, half] - rho.abs()
297
+ ce[:, :, half - 1] = 0.0
298
+
299
+ left_d = cd[:, :, :half].reshape(B, num_sub * half)
300
+ right_d = cd[:, :, half:].reshape(B, num_sub * half)
301
+ current_d = torch.stack([left_d.reshape(B, num_sub, half),
302
+ right_d.reshape(B, num_sub, half)],
303
+ dim=2).reshape(B, pn)
304
+
305
+ left_e = ce[:, :, :half - 1].reshape(B, num_sub, half - 1)
306
+ right_e = ce[:, :, half:].reshape(B, num_sub, half - 1)
307
+ current_e = torch.stack([left_e, right_e], dim=2).reshape(
308
+ B, num_sub * 2 * (half - 1))
309
+ expected_e_len = pn - 1
310
+ if current_e.shape[-1] < expected_e_len:
311
+ current_e = torch.nn.functional.pad(
312
+ current_e, (0, expected_e_len - current_e.shape[-1]))
313
+
314
+ # BASE
315
+ base_evals = current_d
316
+ V_current = torch.ones(B, pn, 1, 1, device=device, dtype=dtype)
317
+
318
+ # UPWARD PASS
319
+ current_evals = base_evals
320
+
321
+ for level in range(self.depth - 1, -1, -1):
322
+ num_sub = 2 ** level
323
+ sub_size = pn // num_sub
324
+ half = sub_size // 2
325
+ child_size = half
326
+
327
+ evals_grouped = current_evals.reshape(B, num_sub, 2, child_size)
328
+ left_evals = evals_grouped[:, :, 0, :]
329
+ right_evals = evals_grouped[:, :, 1, :]
330
+ delta = torch.cat([left_evals, right_evals], dim=-1)
331
+
332
+ V_grouped = V_current.reshape(B, num_sub, 2, child_size, child_size)
333
+ V_left = V_grouped[:, :, 0, :, :]
334
+ V_right = V_grouped[:, :, 1, :, :]
335
+
336
+ z_left = V_left[:, :, -1, :]
337
+ z_right = V_right[:, :, 0, :]
338
+ z_cat = torch.cat([z_left, z_right], dim=-1)
339
+
340
+ delta_sorted, sort_idx = delta.sort(dim=-1)
341
+ z_sorted = z_cat.gather(-1, sort_idx)
342
+
343
+ rho = coupling_rho[level]
344
+ mask = torch.ones(B, num_sub, sub_size, device=device, dtype=dtype)
345
+
346
+ gaps = (delta_sorted[:, :, 1:] - delta_sorted[:, :, :-1]).abs()
347
+ degenerate = gaps < (eps * 100)
348
+ avg = 0.5 * (delta_sorted[:, :, :-1] + delta_sorted[:, :, 1:])
349
+
350
+ delta_defl = delta_sorted.clone()
351
+ delta_defl[:, :, :-1] = torch.where(degenerate, avg, delta_sorted[:, :, :-1])
352
+ delta_defl[:, :, 1:] = torch.where(degenerate, avg, delta_sorted[:, :, 1:])
353
+
354
+ z_defl = z_sorted.clone()
355
+ defl_kill = torch.ones_like(z_sorted)
356
+ defl_kill[:, :, 1:] = torch.where(
357
+ degenerate, torch.zeros_like(gaps), torch.ones_like(gaps))
358
+ z_defl = z_defl * defl_kill
359
+
360
+ z_sq = z_defl * z_defl
361
+ Bns = B * num_sub
362
+ new_evals_flat = self.secular_solver(
363
+ delta_defl.reshape(Bns, sub_size),
364
+ z_sq.reshape(Bns, sub_size),
365
+ rho.reshape(Bns),
366
+ mask.reshape(Bns, sub_size),
367
+ )
368
+ new_evals = new_evals_flat.reshape(B, num_sub, sub_size)
369
+
370
+ V_secular_flat = secular_eigenvectors(
371
+ delta_defl.reshape(Bns, sub_size),
372
+ new_evals_flat,
373
+ z_defl.reshape(Bns, sub_size),
374
+ mask.reshape(Bns, sub_size),
375
+ eps=eps
376
+ )
377
+ V_secular = V_secular_flat.reshape(B, num_sub, sub_size, sub_size)
378
+
379
+ inv_sort = sort_idx.argsort(dim=-1)
380
+ inv_exp = inv_sort.unsqueeze(-1).expand_as(V_secular)
381
+ V_unsorted = V_secular.gather(-2, inv_exp)
382
+
383
+ V_block = torch.zeros(B, num_sub, sub_size, sub_size,
384
+ device=device, dtype=dtype)
385
+ V_block[:, :, :half, :half] = V_left
386
+ V_block[:, :, half:, half:] = V_right
387
+
388
+ V_merged = torch.bmm(
389
+ V_block.reshape(Bns, sub_size, sub_size),
390
+ V_unsorted.reshape(Bns, sub_size, sub_size)
391
+ ).reshape(B, num_sub, sub_size, sub_size)
392
+
393
+ current_evals = new_evals.reshape(B, pn)
394
+ V_current = V_merged
395
+
396
+ eigenvalues = current_evals
397
+ eigenvectors = V_current.squeeze(1)
398
+
399
+ sorted_evals, sort_perm = eigenvalues.sort(dim=-1)
400
+ sort_exp = sort_perm.unsqueeze(-2).expand_as(eigenvectors)
401
+ sorted_evecs = eigenvectors.gather(-1, sort_exp)
402
+
403
+ if n < pn:
404
+ sorted_evals = sorted_evals[:, :n]
405
+ sorted_evecs = sorted_evecs[:, :n, :n]
406
+
407
+ return sorted_evals, sorted_evecs
408
+
409
+
410
+ # =============================================================================
411
+ # Phase 2 (alternate): Jacobi for small n
412
+ # =============================================================================
413
+
414
+ class JacobiEigh(nn.Module):
415
+ """
416
+ Jacobi eigenvalue algorithm for small symmetric matrices.
417
+ Fixed sweep count, fully vectorized, zero branches.
418
+
419
+ COMPILE FIX: Pair indices stored as plain Python lists (not tensors).
420
+ Dynamo sees these as constants β€” no SymInt issues.
421
+ """
422
+
423
+ def __init__(self, max_n: int,
424
+ max_sweeps: int = DEFAULT_MAX_JACOBI_SWEEPS,
425
+ eps: float = 1e-30):
426
+ super().__init__()
427
+ self.max_n = max_n
428
+ self.max_sweeps = max_sweeps
429
+ self.eps = eps
430
+
431
+ # CRITICAL: plain Python lists, NOT registered buffers.
432
+ # Dynamo traces these as compile-time constants.
433
+ pairs = []
434
+ for p in range(max_n):
435
+ for q in range(p + 1, max_n):
436
+ pairs.append((p, q))
437
+ self._pairs_p: list[int] = [p for p, q in pairs]
438
+ self._pairs_q: list[int] = [q for p, q in pairs]
439
+ self._n_pairs: int = len(pairs)
440
+
441
+ def forward(self, A: Tensor) -> Tuple[Tensor, Tensor]:
442
+ """
443
+ A: [B, n, n] symmetric
444
+ Returns: (eigenvalues [B, n] ascending, eigenvectors [B, n, n])
445
+ """
446
+ B, n, _ = A.shape
447
+ eps = self.eps
448
+
449
+ W = A.clone()
450
+ V = torch.eye(n, device=A.device, dtype=A.dtype).unsqueeze(0).expand(B, -1, -1).clone()
451
+
452
+ for _sweep in range(self.max_sweeps):
453
+ for idx in range(self._n_pairs):
454
+ # Plain Python ints β€” Dynamo sees these as constants
455
+ p: int = self._pairs_p[idx]
456
+ q: int = self._pairs_q[idx]
457
+
458
+ app = W[:, p, p]
459
+ aqq = W[:, q, q]
460
+ apq = W[:, p, q]
461
+
462
+ # Givens rotation angle
463
+ two_apq = 2.0 * apq
464
+ diff = aqq - app
465
+
466
+ # Safe division: sign-preserving eps guard
467
+ abs_two_apq = two_apq.abs().clamp(min=eps)
468
+ sign_two_apq = torch.where(two_apq >= 0,
469
+ torch.ones_like(two_apq),
470
+ -torch.ones_like(two_apq))
471
+ tau = diff / (abs_two_apq * sign_two_apq)
472
+
473
+ tau_sign = torch.where(tau >= 0,
474
+ torch.ones_like(tau),
475
+ -torch.ones_like(tau))
476
+ t = tau_sign / (tau.abs() + torch.sqrt(1.0 + tau * tau))
477
+
478
+ # Zero rotation when off-diagonal is already negligible
479
+ skip = (apq.abs() < eps).float()
480
+ t = t * (1.0 - skip)
481
+
482
+ c = 1.0 / torch.sqrt(1.0 + t * t)
483
+ s = t * c
484
+
485
+ # ── Rotate W columns p, q ──
486
+ Wp = W[:, :, p].clone()
487
+ Wq = W[:, :, q].clone()
488
+ c_col = c.unsqueeze(-1)
489
+ s_col = s.unsqueeze(-1)
490
+ W[:, :, p] = c_col * Wp - s_col * Wq
491
+ W[:, :, q] = s_col * Wp + c_col * Wq
492
+
493
+ # ── Rotate W rows p, q ──
494
+ Wp = W[:, p, :].clone()
495
+ Wq = W[:, q, :].clone()
496
+ W[:, p, :] = c_col * Wp - s_col * Wq
497
+ W[:, q, :] = s_col * Wp + c_col * Wq
498
+
499
+ # ── Exact diagonal repair (prevents accumulation drift) ──
500
+ W[:, p, q] = 0.0
501
+ W[:, q, p] = 0.0
502
+ W[:, p, p] = app - t * apq
503
+ W[:, q, q] = aqq + t * apq
504
+
505
+ # ── Accumulate eigenvectors ──
506
+ Vp = V[:, :, p].clone()
507
+ Vq = V[:, :, q].clone()
508
+ V[:, :, p] = c_col * Vp - s_col * Vq
509
+ V[:, :, q] = s_col * Vp + c_col * Vq
510
+
511
+ # ── Newton-Schulz re-orthogonalization ──
512
+ # 2 iterations: orth error 1e-3 β†’ ~1e-12 via bmm (GPU-native)
513
+ V = orthogonalize_ns(V, n_iter=2)
514
+
515
+ # ── Extract and sort ──
516
+ eigenvalues = torch.diagonal(W, dim1=-2, dim2=-1)
517
+ sorted_evals, sort_perm = eigenvalues.sort(dim=-1)
518
+ sort_exp = sort_perm.unsqueeze(-2).expand_as(V)
519
+ sorted_evecs = V.gather(-1, sort_exp)
520
+
521
+ return sorted_evals, sorted_evecs
522
+
523
+
524
+ # =============================================================================
525
+ # Phase 3: Householder Back-Accumulation
526
+ # =============================================================================
527
+
528
+ class HouseholderBackAccumulate(nn.Module):
529
+
530
+ def __init__(self, max_n: int, eps: float = 1e-30):
531
+ super().__init__()
532
+ self.max_n = max_n
533
+ self.eps = eps
534
+
535
+ def forward(self, reflectors: Tensor, Z: Tensor, n: int) -> Tensor:
536
+ V = Z.clone()
537
+ for k in range(n - 3, -1, -1):
538
+ tail_len = n - k - 1
539
+ v = reflectors[k, :, :tail_len]
540
+ v_col = v.unsqueeze(-1)
541
+ V_sub = V[:, k + 1:, :]
542
+ vtV = torch.bmm(v_col.transpose(-2, -1), V_sub)
543
+ V[:, k + 1:, :] = V_sub - 2.0 * v_col @ vtV
544
+ return V
545
+
546
+
547
+ # =============================================================================
548
+ # Validation
549
+ # =============================================================================
550
+
551
+ class EighValidator(nn.Module):
552
+
553
+ def forward(self, A: Tensor, eigenvalues: Tensor,
554
+ eigenvectors: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
555
+ B, n, _ = A.shape
556
+ AV = torch.bmm(A, eigenvectors)
557
+ VL = eigenvectors * eigenvalues.unsqueeze(-2)
558
+ residual = AV - VL
559
+ A_norm = torch.linalg.norm(A.reshape(B, -1), dim=-1).clamp(min=1e-30)
560
+ residual_norm = torch.linalg.norm(residual.reshape(B, -1), dim=-1) / A_norm
561
+
562
+ VtV = torch.bmm(eigenvectors.transpose(-2, -1), eigenvectors)
563
+ I = torch.eye(n, device=A.device, dtype=A.dtype).unsqueeze(0)
564
+ orth_err = torch.linalg.norm((VtV - I).reshape(B, -1), dim=-1)
565
+
566
+ return residual_norm, orth_err, residual_norm.max()
567
+
568
+
569
+ # =============================================================================
570
+ # Top-Level: CompiledEigh
571
+ # =============================================================================
572
+
573
+ class CompiledEigh(nn.Module):
574
+ """
575
+ Drop-in replacement for torch.linalg.eigh.
576
+
577
+ Usage:
578
+ solver = CompiledEigh(max_n=6)
579
+ solver = torch.compile(solver, fullgraph=True)
580
+ eigenvalues, eigenvectors = solver(A)
581
+ """
582
+
583
+ def __init__(self, max_n: int,
584
+ use_jacobi: Optional[bool] = None,
585
+ max_newton: int = DEFAULT_MAX_NEWTON,
586
+ max_jacobi_sweeps: int = DEFAULT_MAX_JACOBI_SWEEPS,
587
+ eps: float = 1e-30, tol: float = 1e-7):
588
+ super().__init__()
589
+ self.max_n = max_n
590
+ self.eps = eps
591
+
592
+ if use_jacobi is None:
593
+ use_jacobi = (max_n <= JACOBI_THRESHOLD)
594
+ self.use_jacobi = use_jacobi
595
+
596
+ if use_jacobi:
597
+ self.jacobi = JacobiEigh(
598
+ max_n=max_n, max_sweeps=max_jacobi_sweeps, eps=eps)
599
+ else:
600
+ self.tridiag = HouseholderTridiagonalizer(max_n=max_n, eps=eps)
601
+ self.dc = TensorTreeDC(
602
+ max_n=max_n, max_newton=max_newton, eps=eps, tol=tol)
603
+ self.back_accum = HouseholderBackAccumulate(max_n=max_n, eps=eps)
604
+
605
+ self.validator = EighValidator()
606
+
607
+ def forward(self, A: Tensor, validate: bool = False
608
+ ) -> Tuple[Tensor, Tensor]:
609
+ B, n, _ = A.shape
610
+
611
+ if self.use_jacobi:
612
+ eigenvalues, eigenvectors = self.jacobi(A)
613
+ else:
614
+ A_work = A.clone()
615
+ d = torch.empty(B, n, device=A.device, dtype=A.dtype)
616
+ e = torch.empty(B, n - 1, device=A.device, dtype=A.dtype)
617
+ reflectors = torch.zeros(max(n - 2, 1), B, n,
618
+ device=A.device, dtype=A.dtype)
619
+ self.tridiag(A_work, d, e, reflectors)
620
+ eigenvalues, Z = self.dc(d, e)
621
+ eigenvectors = self.back_accum(reflectors, Z, n)
622
+ # Newton-Schulz re-orthogonalization for D&C path
623
+ eigenvectors = orthogonalize_ns(eigenvectors, n_iter=2)
624
+
625
+ if validate:
626
+ res_norm, orth_err, max_err = self.validator(A, eigenvalues, eigenvectors)
627
+ print(f"[CompiledEigh] max residual: {max_err.item():.2e}, "
628
+ f"mean orth err: {orth_err.mean().item():.2e}")
629
+
630
+ return eigenvalues, eigenvectors
631
+
632
+
633
+ # =============================================================================
634
+ # Functional API
635
+ # =============================================================================
636
+
637
+ _cached_solvers = {}
638
+
639
+ def compiled_eigh(A: Tensor, validate: bool = False) -> Tuple[Tensor, Tensor]:
640
+ B, n, _ = A.shape
641
+ key = (n, A.device, A.dtype)
642
+ if key not in _cached_solvers:
643
+ _cached_solvers[key] = CompiledEigh(max_n=n).to(A.device)
644
+ return _cached_solvers[key](A, validate=validate)
645
+
646
+
647
+ """
648
+ CompiledEigh β€” Colab GPU Benchmark v3
649
+ Fixes:
650
+ v2: Jacobi pairs as plain Python lists (Dynamo compile fix), sweeps 6β†’10
651
+ v3: Replaced Gram-Schmidt with Newton-Schulz orthogonalization (all-bmm),
652
+ disabled TF32 to ensure fp32 precision on Blackwell
653
+ """
654
+
655
+ import torch
656
+ import time
657
+ import gc
658
+ import sys
659
+
660
+ # ── Ensure full fp32 precision on Ampere/Hopper/Blackwell ──
661
+ # TF32 uses 10-bit mantissa for matmul which can degrade orthogonality
662
+ torch.backends.cuda.matmul.allow_tf32 = False
663
+ torch.backends.cudnn.allow_tf32 = False
664
+ torch.set_float32_matmul_precision('highest')
665
+
666
+
667
+ def sync():
668
+ if torch.cuda.is_available():
669
+ torch.cuda.synchronize()
670
+
671
+
672
+ def gpu_timer(fn, warmup=10, repeats=200):
673
+ for _ in range(warmup):
674
+ fn()
675
+ sync()
676
+ start = time.perf_counter()
677
+ for _ in range(repeats):
678
+ fn()
679
+ sync()
680
+ return (time.perf_counter() - start) / repeats
681
+
682
+
683
+ def make_symmetric_batch(B, n, device, dtype=torch.float32):
684
+ R = torch.randn(B, n, n, device=device, dtype=dtype)
685
+ return (R + R.transpose(-2, -1)) / 2.0
686
+
687
+
688
+ def make_cm_like_batch(B, n, device, dtype=torch.float32):
689
+ points = torch.randn(B, n, n, device=device, dtype=dtype)
690
+ points = points / (points.norm(dim=-1, keepdim=True) + 1e-8)
691
+ return torch.bmm(points, points.transpose(-2, -1)) * 0.3
692
+
693
+
694
+ def fmt_time(seconds):
695
+ if seconds < 1e-3:
696
+ return f"{seconds*1e6:.1f} us"
697
+ elif seconds < 1.0:
698
+ return f"{seconds*1e3:.2f} ms"
699
+ return f"{seconds:.3f} s"
700
+
701
+
702
+ # ─── Test 0: Newton-Schulz Diagnostic ───
703
+
704
+ def test_ns_diagnostic(device):
705
+ """Verify Newton-Schulz orthogonalization works on GPU independently."""
706
+ print("\n" + "=" * 70)
707
+ print(" TEST 0: NEWTON-SCHULZ DIAGNOSTIC")
708
+ print("=" * 70)
709
+
710
+ for n in [5, 6, 8]:
711
+ B = 1024
712
+ # Create nearly-orthogonal matrix (simulating Jacobi output)
713
+ Q, _ = torch.linalg.qr(torch.randn(B, n, n, device=device))
714
+ # Perturb to ~1e-3 orthogonality error
715
+ noise = torch.randn(B, n, n, device=device) * 1e-3
716
+ V_dirty = Q + noise
717
+
718
+ I_n = torch.eye(n, device=device).unsqueeze(0)
719
+
720
+ # Before NS
721
+ VtV_before = torch.bmm(V_dirty.transpose(-2, -1), V_dirty)
722
+ orth_before = torch.linalg.norm((VtV_before - I_n).reshape(B, -1), dim=-1).max().item()
723
+
724
+ # After NS (2 iterations)
725
+ V_clean = orthogonalize_ns(V_dirty, n_iter=2)
726
+ VtV_after = torch.bmm(V_clean.transpose(-2, -1), V_clean)
727
+ orth_after = torch.linalg.norm((VtV_after - I_n).reshape(B, -1), dim=-1).max().item()
728
+
729
+ # After NS (3 iterations for comparison)
730
+ V_clean3 = orthogonalize_ns(V_dirty, n_iter=3)
731
+ VtV_after3 = torch.bmm(V_clean3.transpose(-2, -1), V_clean3)
732
+ orth_after3 = torch.linalg.norm((VtV_after3 - I_n).reshape(B, -1), dim=-1).max().item()
733
+
734
+ print(f" n={n}: before={orth_before:.2e} "
735
+ f"after(2iter)={orth_after:.2e} "
736
+ f"after(3iter)={orth_after3:.2e}")
737
+
738
+ # Also test with actual Jacobi output
739
+ print(f"\n --- With actual Jacobi output ---")
740
+ for n in [5, 6]:
741
+ B = 2048
742
+ A = make_symmetric_batch(B, n, device)
743
+ solver = JacobiEigh(max_n=n, max_sweeps=10).to(device)
744
+
745
+ # Run Jacobi WITHOUT the NS cleanup
746
+ W = A.clone()
747
+ V = torch.eye(n, device=device).unsqueeze(0).expand(B, -1, -1).clone()
748
+ for _sweep in range(solver.max_sweeps):
749
+ for idx in range(solver._n_pairs):
750
+ p, q = solver._pairs_p[idx], solver._pairs_q[idx]
751
+ app, aqq, apq = W[:, p, p], W[:, q, q], W[:, p, q]
752
+ two_apq = 2.0 * apq
753
+ diff = aqq - app
754
+ abs_2apq = two_apq.abs().clamp(min=1e-30)
755
+ sign_2apq = torch.where(two_apq >= 0,
756
+ torch.ones_like(two_apq), -torch.ones_like(two_apq))
757
+ tau = diff / (abs_2apq * sign_2apq)
758
+ tau_sign = torch.where(tau >= 0,
759
+ torch.ones_like(tau), -torch.ones_like(tau))
760
+ t = tau_sign / (tau.abs() + torch.sqrt(1.0 + tau * tau))
761
+ skip = (apq.abs() < 1e-30).float()
762
+ t = t * (1.0 - skip)
763
+ c = 1.0 / torch.sqrt(1.0 + t * t)
764
+ s = t * c
765
+ c_col, s_col = c.unsqueeze(-1), s.unsqueeze(-1)
766
+ Wp = W[:, :, p].clone(); Wq = W[:, :, q].clone()
767
+ W[:, :, p] = c_col * Wp - s_col * Wq
768
+ W[:, :, q] = s_col * Wp + c_col * Wq
769
+ Wp = W[:, p, :].clone(); Wq = W[:, q, :].clone()
770
+ W[:, p, :] = c_col * Wp - s_col * Wq
771
+ W[:, q, :] = s_col * Wp + c_col * Wq
772
+ W[:, p, q] = 0.0; W[:, q, p] = 0.0
773
+ W[:, p, p] = app - t * apq
774
+ W[:, q, q] = aqq + t * apq
775
+ Vp = V[:, :, p].clone(); Vq = V[:, :, q].clone()
776
+ V[:, :, p] = c_col * Vp - s_col * Vq
777
+ V[:, :, q] = s_col * Vp + c_col * Vq
778
+
779
+ I_n = torch.eye(n, device=device).unsqueeze(0)
780
+ VtV = torch.bmm(V.transpose(-2, -1), V)
781
+ orth_raw = torch.linalg.norm((VtV - I_n).reshape(B, -1), dim=-1).max().item()
782
+
783
+ V_ns = orthogonalize_ns(V, n_iter=2)
784
+ VtV_ns = torch.bmm(V_ns.transpose(-2, -1), V_ns)
785
+ orth_ns = torch.linalg.norm((VtV_ns - I_n).reshape(B, -1), dim=-1).max().item()
786
+
787
+ print(f" Jacobi raw n={n}: orth={orth_raw:.2e} after NS(2)={orth_ns:.2e}")
788
+
789
+
790
+ # ─── Test 1: Accuracy ───
791
+
792
+ def test_accuracy(device):
793
+ print("\n" + "=" * 70)
794
+ print(" TEST 1: ACCURACY vs torch.linalg.eigh")
795
+ print("=" * 70)
796
+
797
+ validator = EighValidator()
798
+ configs = [
799
+ (3, 4096, "3x3 small"),
800
+ (5, 4096, "5x5 CM matrix size"),
801
+ (6, 4096, "6x6 pentachoron bordered"),
802
+ (8, 2048, "8x8 padded CM"),
803
+ (12, 1024, "12x12 medium"),
804
+ (16, 512, "16x16 Jacobi boundary"),
805
+ ]
806
+
807
+ all_pass = True
808
+ for n, B, label in configs:
809
+ A = make_symmetric_batch(B, n, device)
810
+ ref_vals, ref_vecs = torch.linalg.eigh(A)
811
+
812
+ solver = CompiledEigh(max_n=n).to(device)
813
+ our_vals, our_vecs = solver(A)
814
+
815
+ val_err = (our_vals - ref_vals).abs().max().item()
816
+ val_mean = (our_vals - ref_vals).abs().mean().item()
817
+
818
+ dots = torch.bmm(ref_vecs.transpose(-2, -1), our_vecs)
819
+ alignment = dots.abs().max(dim=-1).values.min().item()
820
+
821
+ res_norm, orth_err, max_res = validator(A, our_vals, our_vecs)
822
+ max_orth = orth_err.max().item()
823
+
824
+ # Thresholds: eigenval 1e-3, alignment 0.999, orth 1e-4
825
+ ok = val_err < 1e-3 and alignment > 0.999 and max_orth < 1e-4
826
+ if not ok:
827
+ all_pass = False
828
+
829
+ print(f"\n [{'PASS' if ok else 'FAIL'}] {label} (n={n}, B={B})")
830
+ print(f" eigenvalue err max={val_err:.2e} mean={val_mean:.2e}")
831
+ print(f" eigvec alignment min={alignment:.8f}")
832
+ print(f" residual norm max={max_res.item():.2e}")
833
+ print(f" orthogonality max={max_orth:.2e}")
834
+
835
+ print(f"\n --- CM-like spectral distribution ---")
836
+ for n in [5, 6]:
837
+ A = make_cm_like_batch(2048, n, device)
838
+ ref_vals, _ = torch.linalg.eigh(A)
839
+ solver = CompiledEigh(max_n=n).to(device)
840
+ our_vals, our_vecs = solver(A)
841
+ val_err = (our_vals - ref_vals).abs().max().item()
842
+ res_norm, orth_err, max_res = validator(A, our_vals, our_vecs)
843
+ print(f" CM-like n={n}: val_err={val_err:.2e} "
844
+ f"res={max_res.item():.2e} orth={orth_err.max().item():.2e}")
845
+
846
+ return all_pass
847
+
848
+
849
+ # ─── Test 2: torch.compile fullgraph ───
850
+
851
+ def test_compile(device):
852
+ print("\n" + "=" * 70)
853
+ print(" TEST 2: torch.compile(fullgraph=True)")
854
+ print("=" * 70)
855
+
856
+ results = {}
857
+ for n, B, label in [(5, 1024, "5x5"), (6, 1024, "6x6"), (8, 512, "8x8")]:
858
+ A = make_symmetric_batch(B, n, device)
859
+ solver = CompiledEigh(max_n=n).to(device)
860
+
861
+ try:
862
+ compiled_solver = torch.compile(solver, fullgraph=True)
863
+ vals, vecs = compiled_solver(A)
864
+ sync()
865
+ ref_vals, _ = torch.linalg.eigh(A)
866
+ err = (vals - ref_vals).abs().max().item()
867
+ results[label] = ("PASS", err)
868
+ print(f" [{label}] fullgraph=True SUCCESS (val_err={err:.2e})")
869
+ except Exception as e:
870
+ results[label] = ("FAIL", str(e)[:200])
871
+ print(f" [{label}] COMPILE FAILED: {str(e)[:200]}")
872
+
873
+ return all(v[0] == "PASS" for v in results.values())
874
+
875
+
876
+ # ─── Test 3: Throughput ───
877
+
878
+ def test_benchmark(device):
879
+ print("\n" + "=" * 70)
880
+ print(" TEST 3: GPU THROUGHPUT BENCHMARK")
881
+ print("=" * 70)
882
+ print(f" Device: {torch.cuda.get_device_name(0)}")
883
+ print(f" Timing: 10 warmup + 200 repeats\n")
884
+
885
+ configs = [
886
+ (5, 1024, "CM 5x5 B=1024"),
887
+ (5, 4096, "CM 5x5 B=4096"),
888
+ (5, 8192, "CM 5x5 B=8192"),
889
+ (6, 1024, "CM 6x6 B=1024"),
890
+ (6, 4096, "CM 6x6 B=4096"),
891
+ (6, 8192, "CM 6x6 B=8192"),
892
+ (8, 2048, "8x8 B=2048"),
893
+ (16, 1024, "16x16 B=1024"),
894
+ ]
895
+
896
+ print(f" {'Config':<22} {'eigh ref':>10} {'ours eager':>12} "
897
+ f"{'ours compiled':>14} {'vs ref':>8}")
898
+ print(f" {'-'*22} {'-'*10} {'-'*12} {'-'*14} {'-'*8}")
899
+
900
+ for n, B, label in configs:
901
+ A = make_symmetric_batch(B, n, device)
902
+
903
+ ref_time = gpu_timer(lambda: torch.linalg.eigh(A))
904
+
905
+ solver = CompiledEigh(max_n=n).to(device)
906
+ eager_time = gpu_timer(lambda: solver(A))
907
+
908
+ try:
909
+ compiled_solver = torch.compile(solver, fullgraph=True)
910
+ for _ in range(5):
911
+ compiled_solver(A)
912
+ sync()
913
+ compiled_time = gpu_timer(lambda: compiled_solver(A))
914
+ compiled_str = fmt_time(compiled_time)
915
+ speedup = ref_time / compiled_time
916
+ speedup_str = f"{speedup:.2f}x"
917
+ except Exception:
918
+ compiled_str = "FAIL"
919
+ speedup_str = "N/A"
920
+
921
+ print(f" {label:<22} {fmt_time(ref_time):>10} "
922
+ f"{fmt_time(eager_time):>12} {compiled_str:>14} {speedup_str:>8}")
923
+
924
+ print(f"\n --- High batch stress test ---")
925
+ for n in [5, 6]:
926
+ for B in [16384, 32768]:
927
+ try:
928
+ A = make_symmetric_batch(B, n, device)
929
+ solver = CompiledEigh(max_n=n).to(device)
930
+ compiled_solver = torch.compile(solver, fullgraph=True)
931
+ for _ in range(3):
932
+ compiled_solver(A)
933
+ sync()
934
+ t = gpu_timer(lambda: compiled_solver(A), warmup=5, repeats=100)
935
+ ref_t = gpu_timer(lambda: torch.linalg.eigh(A), warmup=5, repeats=100)
936
+ print(f" n={n} B={B}: compiled={fmt_time(t)} ref={fmt_time(ref_t)} "
937
+ f"ratio={ref_t/t:.2f}x throughput={B/t:.0f}/sec")
938
+ except RuntimeError as e:
939
+ if "out of memory" in str(e).lower():
940
+ print(f" n={n} B={B}: OOM")
941
+ torch.cuda.empty_cache()
942
+ else:
943
+ raise
944
+
945
+
946
+ # ─── Test 4: Autograd ───
947
+
948
+ def test_autograd(device):
949
+ print("\n" + "=" * 70)
950
+ print(" TEST 4: AUTOGRAD BACKWARD")
951
+ print("=" * 70)
952
+
953
+ for n, B in [(5, 512), (6, 512)]:
954
+ A_ref = make_symmetric_batch(B, n, device).requires_grad_(True)
955
+ vals_ref, vecs_ref = torch.linalg.eigh(A_ref)
956
+ (vals_ref.sum() + (vecs_ref ** 2).sum()).backward()
957
+ grad_ref = A_ref.grad.clone()
958
+
959
+ # Eager backward
960
+ A_e = A_ref.detach().clone().requires_grad_(True)
961
+ solver = CompiledEigh(max_n=n).to(device)
962
+ try:
963
+ vals_e, vecs_e = solver(A_e)
964
+ (vals_e.sum() + (vecs_e ** 2).sum()).backward()
965
+ err_e = (A_e.grad - grad_ref).abs().max().item()
966
+ rel_e = err_e / (grad_ref.abs().max().item() + 1e-30)
967
+ print(f" [{'PASS' if rel_e < 0.1 else 'WARN'}] n={n} eager backward: "
968
+ f"grad_err={err_e:.2e} rel={rel_e:.2e}")
969
+ except Exception as e:
970
+ print(f" [FAIL] n={n} eager backward: {e}")
971
+
972
+ # Compiled backward (may break β€” forward fullgraph is the key win)
973
+ A_c = A_ref.detach().clone().requires_grad_(True)
974
+ try:
975
+ compiled_solver = torch.compile(solver)
976
+ vals_c, vecs_c = compiled_solver(A_c)
977
+ (vals_c.sum() + (vecs_c ** 2).sum()).backward()
978
+ err_c = (A_c.grad - grad_ref).abs().max().item()
979
+ rel_c = err_c / (grad_ref.abs().max().item() + 1e-30)
980
+ print(f" [{'PASS' if rel_c < 0.1 else 'WARN'}] n={n} compiled backward: "
981
+ f"grad_err={err_c:.2e} rel={rel_c:.2e}")
982
+ except Exception as e:
983
+ print(f" [INFO] n={n} compiled backward: {str(e)[:150]}")
984
+ print(f" (forward fullgraph is the main win)")
985
+
986
+
987
+ # ─── Test 5: VRAM ───
988
+
989
+ def test_vram(device):
990
+ print("\n" + "=" * 70)
991
+ print(" TEST 5: VRAM USAGE")
992
+ print("=" * 70)
993
+
994
+ for n, B in [(5, 4096), (6, 4096), (6, 8192), (5, 8192)]:
995
+ torch.cuda.empty_cache()
996
+ gc.collect()
997
+ torch.cuda.reset_peak_memory_stats()
998
+ base_mem = torch.cuda.memory_allocated()
999
+
1000
+ A = make_symmetric_batch(B, n, device)
1001
+ solver = CompiledEigh(max_n=n).to(device)
1002
+ vals, vecs = solver(A)
1003
+
1004
+ peak_mem = torch.cuda.max_memory_allocated()
1005
+ delta_mb = (peak_mem - base_mem) / (1024 ** 2)
1006
+ print(f" n={n} B={B}: peak delta = {delta_mb:.1f} MB")
1007
+
1008
+ del A, solver, vals, vecs
1009
+ torch.cuda.empty_cache()
1010
+ gc.collect()
1011
+
1012
+
1013
+ # ─── Main ───
1014
+
1015
+ def main():
1016
+ print("=" * 70)
1017
+ print(" CompiledEigh v3 β€” GPU Benchmark Suite")
1018
+ print("=" * 70)
1019
+
1020
+ if not torch.cuda.is_available():
1021
+ print("\n No CUDA. Run on Colab with A100/H100.")
1022
+ sys.exit(1)
1023
+
1024
+ device = torch.device('cuda')
1025
+ print(f"\n GPU: {torch.cuda.get_device_name(0)}")
1026
+ print(f" CUDA: {torch.version.cuda}")
1027
+ print(f" PyTorch: {torch.__version__}")
1028
+ mem_gb = torch.cuda.get_device_properties(0).total_memory / (1024**3)
1029
+ print(f" VRAM: {mem_gb:.1f} GB")
1030
+ print(f" TF32 matmul: {torch.backends.cuda.matmul.allow_tf32}")
1031
+ print(f" float32 precision: {torch.get_float32_matmul_precision()}")
1032
+
1033
+ test_ns_diagnostic(device)
1034
+ acc_ok = test_accuracy(device)
1035
+ compile_ok = test_compile(device)
1036
+ test_benchmark(device)
1037
+ test_autograd(device)
1038
+ test_vram(device)
1039
+
1040
+ print("\n" + "=" * 70)
1041
+ print(" SUMMARY")
1042
+ print("=" * 70)
1043
+ print(f" Accuracy: {'PASS' if acc_ok else 'FAIL'}")
1044
+ print(f" Compile: {'PASS' if compile_ok else 'FAIL'}")
1045
+ print("=" * 70)
1046
+
1047
+
1048
+ if __name__ == '__main__':
1049
+ main()