omrifahn commited on
Commit
4325287
·
verified ·
1 Parent(s): 96080d7

Upload src/kfac_editor.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/kfac_editor.py +524 -0
src/kfac_editor.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ K-FAC Weight Editor
3
+
4
+ Applies K-FAC-based weight editing to suppress memorization by
5
+ removing low-curvature weight components.
6
+
7
+ Based on: "From Memorization to Reasoning in the Spectrum of Loss Curvature"
8
+ """
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch import Tensor
13
+ from typing import Optional, Literal
14
+ from dataclasses import dataclass
15
+ import numpy as np
16
+
17
+
18
+ @dataclass
19
+ class EditConfig:
20
+ """Configuration for K-FAC weight editing."""
21
+
22
+ # Energy threshold: keep top k% of curvature mass
23
+ energy_threshold: float = 0.6
24
+
25
+ # Formula for computing importance
26
+ # 'original': Π_ij = λ_i * μ_j
27
+ # 'modified': Π_ij = λ_i * μ_j * |C_ij|²
28
+ formula: Literal["original", "modified"] = "original"
29
+
30
+ # Device for computation
31
+ device: str = "cuda"
32
+
33
+ # Whether to modify model in-place
34
+ inplace: bool = True
35
+
36
+
37
+ class KFACEditor:
38
+ """
39
+ Edits model weights using K-FAC-based compression to suppress memorization.
40
+
41
+ The editing procedure:
42
+ 1. Eigendecompose A and G matrices from K-FAC
43
+ 2. Transform weights to curvature basis: C = U_G^T @ W @ U_A
44
+ 3. Compute importance scores Π_ij for each component
45
+ 4. Select top components by cumulative energy threshold
46
+ 5. Reconstruct weights: W_edited = U_G @ (C ⊙ M) @ U_A^T
47
+
48
+ The key insight is that high-curvature components (high Π) correspond
49
+ to generalizing directions, while low-curvature components are used
50
+ for memorization.
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ model: nn.Module,
56
+ kfac_stats: dict[str, tuple[Tensor, Tensor]],
57
+ config: Optional[EditConfig] = None,
58
+ ):
59
+ """
60
+ Initialize the editor.
61
+
62
+ Args:
63
+ model: The model to edit
64
+ kfac_stats: Dictionary mapping layer names to (A, G) tuples
65
+ config: Edit configuration
66
+ """
67
+ self.model = model
68
+ self.kfac_stats = kfac_stats
69
+ self.config = config or EditConfig()
70
+
71
+ # Cache for eigendecompositions
72
+ self._eigen_cache: dict[str, dict] = {}
73
+
74
+ # Statistics about edits
75
+ self.edit_stats: dict[str, dict] = {}
76
+
77
+ def eigendecompose(
78
+ self,
79
+ A: Tensor,
80
+ G: Tensor,
81
+ regularization: float = 1e-6,
82
+ ) -> dict:
83
+ """
84
+ Compute eigendecomposition of K-FAC factors.
85
+
86
+ Args:
87
+ A: Activation covariance matrix (d_in x d_in)
88
+ G: Gradient covariance matrix (d_out x d_out)
89
+ regularization: Small value added to diagonal for numerical stability
90
+
91
+ Returns:
92
+ Dictionary with eigenvalues and eigenvectors for both A and G
93
+ """
94
+ # Move to CPU for eigendecomposition (MPS doesn't support eigh)
95
+ # Store original device/dtype to move back later
96
+ original_device = A.device
97
+ original_dtype = A.dtype
98
+
99
+ # Convert to float32 on CPU for numerical stability in eigendecomposition
100
+ A = A.to(device="cpu", dtype=torch.float32)
101
+ G = G.to(device="cpu", dtype=torch.float32)
102
+
103
+ # Ensure symmetric (should already be, but floating point...)
104
+ A = (A + A.T) / 2
105
+ G = (G + G.T) / 2
106
+
107
+ # Add regularization
108
+ A = A + regularization * torch.eye(A.shape[0], device=A.device, dtype=A.dtype)
109
+ G = G + regularization * torch.eye(G.shape[0], device=G.device, dtype=G.dtype)
110
+
111
+ # Eigendecomposition on CPU
112
+ # Returns eigenvalues in ascending order, so we flip
113
+ lambda_A, U_A = torch.linalg.eigh(A)
114
+ lambda_G, U_G = torch.linalg.eigh(G)
115
+
116
+ # Sort descending
117
+ idx_A = torch.argsort(lambda_A, descending=True)
118
+ idx_G = torch.argsort(lambda_G, descending=True)
119
+
120
+ lambda_A = lambda_A[idx_A]
121
+ U_A = U_A[:, idx_A]
122
+ lambda_G = lambda_G[idx_G]
123
+ U_G = U_G[:, idx_G]
124
+
125
+ # Clamp negative eigenvalues (numerical issues)
126
+ lambda_A = torch.clamp(lambda_A, min=0)
127
+ lambda_G = torch.clamp(lambda_G, min=0)
128
+
129
+ return {
130
+ "lambda_A": lambda_A, # (d_in,) - μ in paper notation
131
+ "U_A": U_A, # (d_in, d_in)
132
+ "lambda_G": lambda_G, # (d_out,) - λ in paper notation
133
+ "U_G": U_G, # (d_out, d_out)
134
+ }
135
+
136
+ def transform_to_curvature_basis(
137
+ self,
138
+ W: Tensor,
139
+ U_A: Tensor,
140
+ U_G: Tensor,
141
+ ) -> Tensor:
142
+ """
143
+ Transform weights to curvature basis.
144
+
145
+ C = U_G^T @ W @ U_A
146
+
147
+ Each C_ij represents the component of W along the direction
148
+ defined by the i-th gradient eigenvector and j-th activation eigenvector.
149
+ """
150
+ return U_G.T @ W @ U_A
151
+
152
+ def compute_importance_original(
153
+ self,
154
+ lambda_A: Tensor,
155
+ lambda_G: Tensor,
156
+ ) -> Tensor:
157
+ """
158
+ Compute importance scores using the original formula.
159
+
160
+ Π_ij = λ_i * μ_j
161
+
162
+ This is the outer product of the eigenvalues.
163
+ """
164
+ # lambda_G: (d_out,) -> λ_i
165
+ # lambda_A: (d_in,) -> μ_j
166
+ # Result: (d_out, d_in)
167
+ return torch.outer(lambda_G, lambda_A)
168
+
169
+ def compute_importance_modified(
170
+ self,
171
+ lambda_A: Tensor,
172
+ lambda_G: Tensor,
173
+ C: Tensor,
174
+ ) -> Tensor:
175
+ """
176
+ Compute importance scores using the modified formula.
177
+
178
+ Π_ij = λ_i * μ_j * |C_ij|²
179
+
180
+ This weights the curvature by the actual magnitude of the
181
+ weight component in that direction.
182
+ """
183
+ # Base importance from eigenvalues
184
+ Pi_base = torch.outer(lambda_G, lambda_A)
185
+
186
+ # Weight by squared magnitude of transformed weights
187
+ return Pi_base * (C ** 2)
188
+
189
+ def compute_energy_mask(
190
+ self,
191
+ importance: Tensor,
192
+ threshold: float,
193
+ ) -> tuple[Tensor, dict]:
194
+ """
195
+ Compute binary mask keeping top components by cumulative energy.
196
+
197
+ Args:
198
+ importance: Importance scores (d_out, d_in)
199
+ threshold: Fraction of total energy to retain (e.g., 0.6 = 60%)
200
+
201
+ Returns:
202
+ Tuple of (mask, statistics)
203
+ """
204
+ # Flatten and sort
205
+ flat_importance = importance.flatten()
206
+ total_energy = flat_importance.sum()
207
+
208
+ # Sort descending
209
+ sorted_vals, sorted_indices = torch.sort(flat_importance, descending=True)
210
+
211
+ # Cumulative sum
212
+ cumsum = torch.cumsum(sorted_vals, dim=0)
213
+
214
+ # Find cutoff index
215
+ target_energy = threshold * total_energy
216
+ cutoff_idx = torch.searchsorted(cumsum, target_energy).item()
217
+ cutoff_idx = max(1, min(cutoff_idx + 1, len(flat_importance))) # At least 1, at most all
218
+
219
+ # Create mask
220
+ mask = torch.zeros_like(flat_importance, dtype=torch.bool)
221
+ mask[sorted_indices[:cutoff_idx]] = True
222
+ mask = mask.reshape(importance.shape)
223
+
224
+ # Compute statistics
225
+ n_kept = mask.sum().item()
226
+ n_total = mask.numel()
227
+ actual_energy = flat_importance[mask.flatten()].sum().item()
228
+
229
+ stats = {
230
+ "n_kept": n_kept,
231
+ "n_total": n_total,
232
+ "fraction_kept": n_kept / n_total,
233
+ "energy_retained": actual_energy / total_energy.item() if total_energy > 0 else 0,
234
+ "threshold": threshold,
235
+ }
236
+
237
+ return mask, stats
238
+
239
+ def reconstruct_weights(
240
+ self,
241
+ C: Tensor,
242
+ mask: Tensor,
243
+ U_A: Tensor,
244
+ U_G: Tensor,
245
+ ) -> Tensor:
246
+ """
247
+ Reconstruct weights from masked curvature components.
248
+
249
+ W_edited = U_G @ (C ⊙ M) @ U_A^T
250
+ """
251
+ C_masked = C * mask.float()
252
+ return U_G @ C_masked @ U_A.T
253
+
254
+ def _get_weight_matrix(self, layer_name: str) -> Tensor:
255
+ """Get the weight matrix for a given layer name."""
256
+ # Parse layer name (format: "layer_X.proj_name")
257
+ parts = layer_name.split(".")
258
+ layer_idx = int(parts[0].replace("layer_", ""))
259
+ proj_name = parts[1]
260
+
261
+ # Navigate model structure
262
+ layers = None
263
+ if hasattr(self.model, "model") and hasattr(self.model.model, "layers"):
264
+ layers = self.model.model.layers
265
+ elif hasattr(self.model, "transformer") and hasattr(self.model.transformer, "blocks"):
266
+ layers = self.model.transformer.blocks
267
+ elif hasattr(self.model, "layers"):
268
+ layers = self.model.layers
269
+
270
+ if layers is None:
271
+ raise ValueError(f"Could not find layers in model")
272
+
273
+ layer = layers[layer_idx]
274
+
275
+ # Find MLP
276
+ mlp = None
277
+ if hasattr(layer, "mlp"):
278
+ mlp = layer.mlp
279
+ elif hasattr(layer, "feed_forward"):
280
+ mlp = layer.feed_forward
281
+ elif hasattr(layer, "ff"):
282
+ mlp = layer.ff
283
+
284
+ if mlp is None:
285
+ raise ValueError(f"Could not find MLP in layer {layer_idx}")
286
+
287
+ # Get projection
288
+ proj = getattr(mlp, proj_name)
289
+ return proj.weight
290
+
291
+ def _set_weight_matrix(self, layer_name: str, new_weight: Tensor) -> None:
292
+ """Set the weight matrix for a given layer name."""
293
+ parts = layer_name.split(".")
294
+ layer_idx = int(parts[0].replace("layer_", ""))
295
+ proj_name = parts[1]
296
+
297
+ layers = None
298
+ if hasattr(self.model, "model") and hasattr(self.model.model, "layers"):
299
+ layers = self.model.model.layers
300
+ elif hasattr(self.model, "transformer") and hasattr(self.model.transformer, "blocks"):
301
+ layers = self.model.transformer.blocks
302
+ elif hasattr(self.model, "layers"):
303
+ layers = self.model.layers
304
+
305
+ layer = layers[layer_idx]
306
+
307
+ mlp = None
308
+ if hasattr(layer, "mlp"):
309
+ mlp = layer.mlp
310
+ elif hasattr(layer, "feed_forward"):
311
+ mlp = layer.feed_forward
312
+ elif hasattr(layer, "ff"):
313
+ mlp = layer.ff
314
+
315
+ proj = getattr(mlp, proj_name)
316
+ proj.weight.data = new_weight
317
+
318
+ def edit_layer(
319
+ self,
320
+ layer_name: str,
321
+ energy_threshold: Optional[float] = None,
322
+ formula: Optional[str] = None,
323
+ ) -> dict:
324
+ """
325
+ Apply K-FAC editing to a single layer.
326
+
327
+ Args:
328
+ layer_name: Name of the layer to edit (e.g., "layer_11.gate_proj")
329
+ energy_threshold: Override config threshold
330
+ formula: Override config formula
331
+
332
+ Returns:
333
+ Statistics about the edit
334
+ """
335
+ threshold = energy_threshold or self.config.energy_threshold
336
+ edit_formula = formula or self.config.formula
337
+
338
+ if layer_name not in self.kfac_stats:
339
+ raise ValueError(f"No K-FAC statistics for layer {layer_name}")
340
+
341
+ A, G = self.kfac_stats[layer_name]
342
+ # Keep A and G on CPU - eigendecompose will use CPU for compatibility
343
+ A = A.to(device="cpu", dtype=torch.float32)
344
+ G = G.to(device="cpu", dtype=torch.float32)
345
+
346
+ # Get eigendecomposition (cached) - all on CPU
347
+ if layer_name not in self._eigen_cache:
348
+ self._eigen_cache[layer_name] = self.eigendecompose(A, G)
349
+
350
+ eigen = self._eigen_cache[layer_name]
351
+ lambda_A = eigen["lambda_A"]
352
+ lambda_G = eigen["lambda_G"]
353
+ U_A = eigen["U_A"]
354
+ U_G = eigen["U_G"]
355
+
356
+ # Get current weights - move to CPU for matrix operations
357
+ W_original = self._get_weight_matrix(layer_name)
358
+ original_device = W_original.device
359
+ original_dtype = W_original.dtype
360
+ W = W_original.to(device="cpu", dtype=torch.float32)
361
+ original_norm = torch.norm(W).item()
362
+
363
+ # Transform to curvature basis (all on CPU)
364
+ C = self.transform_to_curvature_basis(W, U_A, U_G)
365
+
366
+ # Compute importance
367
+ if edit_formula == "original":
368
+ importance = self.compute_importance_original(lambda_A, lambda_G)
369
+ elif edit_formula == "modified":
370
+ importance = self.compute_importance_modified(lambda_A, lambda_G, C)
371
+ else:
372
+ raise ValueError(f"Unknown formula: {edit_formula}")
373
+
374
+ # Get mask
375
+ mask, mask_stats = self.compute_energy_mask(importance, threshold)
376
+
377
+ # Reconstruct (on CPU)
378
+ W_edited = self.reconstruct_weights(C, mask, U_A, U_G)
379
+ edited_norm = torch.norm(W_edited).item()
380
+
381
+ # Move back to original device/dtype
382
+ W_edited = W_edited.to(device=original_device, dtype=original_dtype)
383
+
384
+ # Apply edit
385
+ if self.config.inplace:
386
+ self._set_weight_matrix(layer_name, W_edited)
387
+
388
+ # Compute statistics
389
+ stats = {
390
+ "layer_name": layer_name,
391
+ "formula": edit_formula,
392
+ "threshold": threshold,
393
+ "original_norm": original_norm,
394
+ "edited_norm": edited_norm,
395
+ "norm_change": (edited_norm - original_norm) / original_norm if original_norm > 0 else 0,
396
+ **mask_stats,
397
+ }
398
+
399
+ self.edit_stats[layer_name] = stats
400
+ return stats
401
+
402
+ def edit_model(
403
+ self,
404
+ layers: Optional[list[str]] = None,
405
+ energy_threshold: Optional[float] = None,
406
+ formula: Optional[str] = None,
407
+ verbose: bool = True,
408
+ ) -> dict:
409
+ """
410
+ Apply K-FAC editing to multiple layers.
411
+
412
+ Args:
413
+ layers: List of layer names to edit (default: all available)
414
+ energy_threshold: Override config threshold
415
+ formula: Override config formula
416
+ verbose: Print progress
417
+
418
+ Returns:
419
+ Summary statistics
420
+ """
421
+ if layers is None:
422
+ layers = list(self.kfac_stats.keys())
423
+
424
+ if verbose:
425
+ print(f"Editing {len(layers)} layers with {formula or self.config.formula} formula, "
426
+ f"{(energy_threshold or self.config.energy_threshold)*100:.0f}% energy threshold")
427
+
428
+ all_stats = []
429
+ for layer_name in layers:
430
+ stats = self.edit_layer(layer_name, energy_threshold, formula)
431
+ all_stats.append(stats)
432
+
433
+ if verbose:
434
+ print(f" {layer_name}: kept {stats['fraction_kept']*100:.1f}% components, "
435
+ f"energy {stats['energy_retained']*100:.1f}%, "
436
+ f"norm change {stats['norm_change']*100:+.1f}%")
437
+
438
+ # Summary
439
+ summary = {
440
+ "n_layers_edited": len(all_stats),
441
+ "avg_fraction_kept": np.mean([s["fraction_kept"] for s in all_stats]),
442
+ "avg_energy_retained": np.mean([s["energy_retained"] for s in all_stats]),
443
+ "avg_norm_change": np.mean([s["norm_change"] for s in all_stats]),
444
+ "layers": all_stats,
445
+ }
446
+
447
+ return summary
448
+
449
+ def restore_original(self, layer_name: str) -> None:
450
+ """
451
+ Restore original weights for a layer.
452
+
453
+ Note: This only works if we kept a copy of the original weights,
454
+ which we don't by default. This method would need to be called
455
+ before editing if restoration is desired.
456
+ """
457
+ raise NotImplementedError(
458
+ "Original weight restoration not implemented. "
459
+ "Reload the model to restore original weights."
460
+ )
461
+
462
+
463
+ def compare_formulas(
464
+ model: nn.Module,
465
+ kfac_stats: dict[str, tuple[Tensor, Tensor]],
466
+ test_fn,
467
+ layers: Optional[list[str]] = None,
468
+ energy_thresholds: list[float] = [0.5, 0.6, 0.7, 0.8],
469
+ device: str = "cuda",
470
+ ) -> dict:
471
+ """
472
+ Compare original and modified formulas across different thresholds.
473
+
474
+ Args:
475
+ model: Model to edit (will be modified!)
476
+ kfac_stats: K-FAC statistics
477
+ test_fn: Function that takes model and returns metrics dict
478
+ layers: Layers to edit
479
+ energy_thresholds: List of thresholds to test
480
+ device: Device for computation
481
+
482
+ Returns:
483
+ Results dictionary
484
+ """
485
+ import copy
486
+
487
+ results = {
488
+ "baseline": None,
489
+ "original": {},
490
+ "modified": {},
491
+ }
492
+
493
+ # Baseline (no editing)
494
+ results["baseline"] = test_fn(model)
495
+ print(f"Baseline: {results['baseline']}")
496
+
497
+ # Save original state
498
+ original_state = copy.deepcopy(model.state_dict())
499
+
500
+ for formula in ["original", "modified"]:
501
+ for threshold in energy_thresholds:
502
+ # Restore original weights
503
+ model.load_state_dict(original_state)
504
+
505
+ # Apply edit
506
+ config = EditConfig(
507
+ energy_threshold=threshold,
508
+ formula=formula,
509
+ device=device,
510
+ )
511
+ editor = KFACEditor(model, kfac_stats, config)
512
+ edit_stats = editor.edit_model(layers, verbose=False)
513
+
514
+ # Test
515
+ metrics = test_fn(model)
516
+ metrics["edit_stats"] = edit_stats
517
+
518
+ results[formula][threshold] = metrics
519
+ print(f"{formula} @ {threshold*100:.0f}%: {metrics}")
520
+
521
+ # Restore original
522
+ model.load_state_dict(original_state)
523
+
524
+ return results