kfac-memorization-code / src /kfac_editor.py
omrifahn's picture
Upload src/kfac_editor.py with huggingface_hub
4325287 verified
"""
K-FAC Weight Editor
Applies K-FAC-based weight editing to suppress memorization by
removing low-curvature weight components.
Based on: "From Memorization to Reasoning in the Spectrum of Loss Curvature"
"""
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional, Literal
from dataclasses import dataclass
import numpy as np
@dataclass
class EditConfig:
"""Configuration for K-FAC weight editing."""
# Energy threshold: keep top k% of curvature mass
energy_threshold: float = 0.6
# Formula for computing importance
# 'original': Π_ij = λ_i * μ_j
# 'modified': Π_ij = λ_i * μ_j * |C_ij|²
formula: Literal["original", "modified"] = "original"
# Device for computation
device: str = "cuda"
# Whether to modify model in-place
inplace: bool = True
class KFACEditor:
"""
Edits model weights using K-FAC-based compression to suppress memorization.
The editing procedure:
1. Eigendecompose A and G matrices from K-FAC
2. Transform weights to curvature basis: C = U_G^T @ W @ U_A
3. Compute importance scores Π_ij for each component
4. Select top components by cumulative energy threshold
5. Reconstruct weights: W_edited = U_G @ (C ⊙ M) @ U_A^T
The key insight is that high-curvature components (high Π) correspond
to generalizing directions, while low-curvature components are used
for memorization.
"""
def __init__(
self,
model: nn.Module,
kfac_stats: dict[str, tuple[Tensor, Tensor]],
config: Optional[EditConfig] = None,
):
"""
Initialize the editor.
Args:
model: The model to edit
kfac_stats: Dictionary mapping layer names to (A, G) tuples
config: Edit configuration
"""
self.model = model
self.kfac_stats = kfac_stats
self.config = config or EditConfig()
# Cache for eigendecompositions
self._eigen_cache: dict[str, dict] = {}
# Statistics about edits
self.edit_stats: dict[str, dict] = {}
def eigendecompose(
self,
A: Tensor,
G: Tensor,
regularization: float = 1e-6,
) -> dict:
"""
Compute eigendecomposition of K-FAC factors.
Args:
A: Activation covariance matrix (d_in x d_in)
G: Gradient covariance matrix (d_out x d_out)
regularization: Small value added to diagonal for numerical stability
Returns:
Dictionary with eigenvalues and eigenvectors for both A and G
"""
# Move to CPU for eigendecomposition (MPS doesn't support eigh)
# Store original device/dtype to move back later
original_device = A.device
original_dtype = A.dtype
# Convert to float32 on CPU for numerical stability in eigendecomposition
A = A.to(device="cpu", dtype=torch.float32)
G = G.to(device="cpu", dtype=torch.float32)
# Ensure symmetric (should already be, but floating point...)
A = (A + A.T) / 2
G = (G + G.T) / 2
# Add regularization
A = A + regularization * torch.eye(A.shape[0], device=A.device, dtype=A.dtype)
G = G + regularization * torch.eye(G.shape[0], device=G.device, dtype=G.dtype)
# Eigendecomposition on CPU
# Returns eigenvalues in ascending order, so we flip
lambda_A, U_A = torch.linalg.eigh(A)
lambda_G, U_G = torch.linalg.eigh(G)
# Sort descending
idx_A = torch.argsort(lambda_A, descending=True)
idx_G = torch.argsort(lambda_G, descending=True)
lambda_A = lambda_A[idx_A]
U_A = U_A[:, idx_A]
lambda_G = lambda_G[idx_G]
U_G = U_G[:, idx_G]
# Clamp negative eigenvalues (numerical issues)
lambda_A = torch.clamp(lambda_A, min=0)
lambda_G = torch.clamp(lambda_G, min=0)
return {
"lambda_A": lambda_A, # (d_in,) - μ in paper notation
"U_A": U_A, # (d_in, d_in)
"lambda_G": lambda_G, # (d_out,) - λ in paper notation
"U_G": U_G, # (d_out, d_out)
}
def transform_to_curvature_basis(
self,
W: Tensor,
U_A: Tensor,
U_G: Tensor,
) -> Tensor:
"""
Transform weights to curvature basis.
C = U_G^T @ W @ U_A
Each C_ij represents the component of W along the direction
defined by the i-th gradient eigenvector and j-th activation eigenvector.
"""
return U_G.T @ W @ U_A
def compute_importance_original(
self,
lambda_A: Tensor,
lambda_G: Tensor,
) -> Tensor:
"""
Compute importance scores using the original formula.
Π_ij = λ_i * μ_j
This is the outer product of the eigenvalues.
"""
# lambda_G: (d_out,) -> λ_i
# lambda_A: (d_in,) -> μ_j
# Result: (d_out, d_in)
return torch.outer(lambda_G, lambda_A)
def compute_importance_modified(
self,
lambda_A: Tensor,
lambda_G: Tensor,
C: Tensor,
) -> Tensor:
"""
Compute importance scores using the modified formula.
Π_ij = λ_i * μ_j * |C_ij|²
This weights the curvature by the actual magnitude of the
weight component in that direction.
"""
# Base importance from eigenvalues
Pi_base = torch.outer(lambda_G, lambda_A)
# Weight by squared magnitude of transformed weights
return Pi_base * (C ** 2)
def compute_energy_mask(
self,
importance: Tensor,
threshold: float,
) -> tuple[Tensor, dict]:
"""
Compute binary mask keeping top components by cumulative energy.
Args:
importance: Importance scores (d_out, d_in)
threshold: Fraction of total energy to retain (e.g., 0.6 = 60%)
Returns:
Tuple of (mask, statistics)
"""
# Flatten and sort
flat_importance = importance.flatten()
total_energy = flat_importance.sum()
# Sort descending
sorted_vals, sorted_indices = torch.sort(flat_importance, descending=True)
# Cumulative sum
cumsum = torch.cumsum(sorted_vals, dim=0)
# Find cutoff index
target_energy = threshold * total_energy
cutoff_idx = torch.searchsorted(cumsum, target_energy).item()
cutoff_idx = max(1, min(cutoff_idx + 1, len(flat_importance))) # At least 1, at most all
# Create mask
mask = torch.zeros_like(flat_importance, dtype=torch.bool)
mask[sorted_indices[:cutoff_idx]] = True
mask = mask.reshape(importance.shape)
# Compute statistics
n_kept = mask.sum().item()
n_total = mask.numel()
actual_energy = flat_importance[mask.flatten()].sum().item()
stats = {
"n_kept": n_kept,
"n_total": n_total,
"fraction_kept": n_kept / n_total,
"energy_retained": actual_energy / total_energy.item() if total_energy > 0 else 0,
"threshold": threshold,
}
return mask, stats
def reconstruct_weights(
self,
C: Tensor,
mask: Tensor,
U_A: Tensor,
U_G: Tensor,
) -> Tensor:
"""
Reconstruct weights from masked curvature components.
W_edited = U_G @ (C ⊙ M) @ U_A^T
"""
C_masked = C * mask.float()
return U_G @ C_masked @ U_A.T
def _get_weight_matrix(self, layer_name: str) -> Tensor:
"""Get the weight matrix for a given layer name."""
# Parse layer name (format: "layer_X.proj_name")
parts = layer_name.split(".")
layer_idx = int(parts[0].replace("layer_", ""))
proj_name = parts[1]
# Navigate model structure
layers = None
if hasattr(self.model, "model") and hasattr(self.model.model, "layers"):
layers = self.model.model.layers
elif hasattr(self.model, "transformer") and hasattr(self.model.transformer, "blocks"):
layers = self.model.transformer.blocks
elif hasattr(self.model, "layers"):
layers = self.model.layers
if layers is None:
raise ValueError(f"Could not find layers in model")
layer = layers[layer_idx]
# Find MLP
mlp = None
if hasattr(layer, "mlp"):
mlp = layer.mlp
elif hasattr(layer, "feed_forward"):
mlp = layer.feed_forward
elif hasattr(layer, "ff"):
mlp = layer.ff
if mlp is None:
raise ValueError(f"Could not find MLP in layer {layer_idx}")
# Get projection
proj = getattr(mlp, proj_name)
return proj.weight
def _set_weight_matrix(self, layer_name: str, new_weight: Tensor) -> None:
"""Set the weight matrix for a given layer name."""
parts = layer_name.split(".")
layer_idx = int(parts[0].replace("layer_", ""))
proj_name = parts[1]
layers = None
if hasattr(self.model, "model") and hasattr(self.model.model, "layers"):
layers = self.model.model.layers
elif hasattr(self.model, "transformer") and hasattr(self.model.transformer, "blocks"):
layers = self.model.transformer.blocks
elif hasattr(self.model, "layers"):
layers = self.model.layers
layer = layers[layer_idx]
mlp = None
if hasattr(layer, "mlp"):
mlp = layer.mlp
elif hasattr(layer, "feed_forward"):
mlp = layer.feed_forward
elif hasattr(layer, "ff"):
mlp = layer.ff
proj = getattr(mlp, proj_name)
proj.weight.data = new_weight
def edit_layer(
self,
layer_name: str,
energy_threshold: Optional[float] = None,
formula: Optional[str] = None,
) -> dict:
"""
Apply K-FAC editing to a single layer.
Args:
layer_name: Name of the layer to edit (e.g., "layer_11.gate_proj")
energy_threshold: Override config threshold
formula: Override config formula
Returns:
Statistics about the edit
"""
threshold = energy_threshold or self.config.energy_threshold
edit_formula = formula or self.config.formula
if layer_name not in self.kfac_stats:
raise ValueError(f"No K-FAC statistics for layer {layer_name}")
A, G = self.kfac_stats[layer_name]
# Keep A and G on CPU - eigendecompose will use CPU for compatibility
A = A.to(device="cpu", dtype=torch.float32)
G = G.to(device="cpu", dtype=torch.float32)
# Get eigendecomposition (cached) - all on CPU
if layer_name not in self._eigen_cache:
self._eigen_cache[layer_name] = self.eigendecompose(A, G)
eigen = self._eigen_cache[layer_name]
lambda_A = eigen["lambda_A"]
lambda_G = eigen["lambda_G"]
U_A = eigen["U_A"]
U_G = eigen["U_G"]
# Get current weights - move to CPU for matrix operations
W_original = self._get_weight_matrix(layer_name)
original_device = W_original.device
original_dtype = W_original.dtype
W = W_original.to(device="cpu", dtype=torch.float32)
original_norm = torch.norm(W).item()
# Transform to curvature basis (all on CPU)
C = self.transform_to_curvature_basis(W, U_A, U_G)
# Compute importance
if edit_formula == "original":
importance = self.compute_importance_original(lambda_A, lambda_G)
elif edit_formula == "modified":
importance = self.compute_importance_modified(lambda_A, lambda_G, C)
else:
raise ValueError(f"Unknown formula: {edit_formula}")
# Get mask
mask, mask_stats = self.compute_energy_mask(importance, threshold)
# Reconstruct (on CPU)
W_edited = self.reconstruct_weights(C, mask, U_A, U_G)
edited_norm = torch.norm(W_edited).item()
# Move back to original device/dtype
W_edited = W_edited.to(device=original_device, dtype=original_dtype)
# Apply edit
if self.config.inplace:
self._set_weight_matrix(layer_name, W_edited)
# Compute statistics
stats = {
"layer_name": layer_name,
"formula": edit_formula,
"threshold": threshold,
"original_norm": original_norm,
"edited_norm": edited_norm,
"norm_change": (edited_norm - original_norm) / original_norm if original_norm > 0 else 0,
**mask_stats,
}
self.edit_stats[layer_name] = stats
return stats
def edit_model(
self,
layers: Optional[list[str]] = None,
energy_threshold: Optional[float] = None,
formula: Optional[str] = None,
verbose: bool = True,
) -> dict:
"""
Apply K-FAC editing to multiple layers.
Args:
layers: List of layer names to edit (default: all available)
energy_threshold: Override config threshold
formula: Override config formula
verbose: Print progress
Returns:
Summary statistics
"""
if layers is None:
layers = list(self.kfac_stats.keys())
if verbose:
print(f"Editing {len(layers)} layers with {formula or self.config.formula} formula, "
f"{(energy_threshold or self.config.energy_threshold)*100:.0f}% energy threshold")
all_stats = []
for layer_name in layers:
stats = self.edit_layer(layer_name, energy_threshold, formula)
all_stats.append(stats)
if verbose:
print(f" {layer_name}: kept {stats['fraction_kept']*100:.1f}% components, "
f"energy {stats['energy_retained']*100:.1f}%, "
f"norm change {stats['norm_change']*100:+.1f}%")
# Summary
summary = {
"n_layers_edited": len(all_stats),
"avg_fraction_kept": np.mean([s["fraction_kept"] for s in all_stats]),
"avg_energy_retained": np.mean([s["energy_retained"] for s in all_stats]),
"avg_norm_change": np.mean([s["norm_change"] for s in all_stats]),
"layers": all_stats,
}
return summary
def restore_original(self, layer_name: str) -> None:
"""
Restore original weights for a layer.
Note: This only works if we kept a copy of the original weights,
which we don't by default. This method would need to be called
before editing if restoration is desired.
"""
raise NotImplementedError(
"Original weight restoration not implemented. "
"Reload the model to restore original weights."
)
def compare_formulas(
model: nn.Module,
kfac_stats: dict[str, tuple[Tensor, Tensor]],
test_fn,
layers: Optional[list[str]] = None,
energy_thresholds: list[float] = [0.5, 0.6, 0.7, 0.8],
device: str = "cuda",
) -> dict:
"""
Compare original and modified formulas across different thresholds.
Args:
model: Model to edit (will be modified!)
kfac_stats: K-FAC statistics
test_fn: Function that takes model and returns metrics dict
layers: Layers to edit
energy_thresholds: List of thresholds to test
device: Device for computation
Returns:
Results dictionary
"""
import copy
results = {
"baseline": None,
"original": {},
"modified": {},
}
# Baseline (no editing)
results["baseline"] = test_fn(model)
print(f"Baseline: {results['baseline']}")
# Save original state
original_state = copy.deepcopy(model.state_dict())
for formula in ["original", "modified"]:
for threshold in energy_thresholds:
# Restore original weights
model.load_state_dict(original_state)
# Apply edit
config = EditConfig(
energy_threshold=threshold,
formula=formula,
device=device,
)
editor = KFACEditor(model, kfac_stats, config)
edit_stats = editor.edit_model(layers, verbose=False)
# Test
metrics = test_fn(model)
metrics["edit_stats"] = edit_stats
results[formula][threshold] = metrics
print(f"{formula} @ {threshold*100:.0f}%: {metrics}")
# Restore original
model.load_state_dict(original_state)
return results