raayraay commited on
Commit
998155c
·
verified ·
1 Parent(s): 8219fe5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -35,7 +35,7 @@ def farms_spectral_analysis(tensor, num_patches=10, patch_size=64):
35
  try:
36
  # We only need top components to find the "Universal Subspace"
37
  u, s, vh = np.linalg.svd(patch.float().numpy(), full_matrices=False)
38
- u_list.append(u[:, :1]) # Keep top principal direction
39
  except:
40
  continue
41
 
@@ -46,8 +46,8 @@ def farms_spectral_analysis(tensor, num_patches=10, patch_size=64):
46
  # Fallback to full SVD for the merging step, but using the "Universal" concept
47
  # We posit the top k singular vectors form the shared subspace.
48
  try:
49
- u, s, v = torch.svd_lowrank(tensor.float(), q=32) # Efficient randomized SVD
50
- return u, v # Returns Left (U) and Right (V) singular vectors
51
  except:
52
  return None, None
53
 
@@ -72,9 +72,9 @@ def farms_spectral_analysis(tensor, num_patches=10, patch_size=64):
72
  if not layer_tensors:
73
  continue
74
 
75
- # Stack for analysis
76
- # Shape: (N_adapters, rows, cols)
77
- stack = torch.stack(layer_tensors)
78
  avg_weight = torch.mean(stack, dim=0)
79
 
80
  # 2. IF it's a LoRA weight (usually 'lora_A' or 'lora_B'), we try SAMM
 
35
  try:
36
  # We only need top components to find the "Universal Subspace"
37
  u, s, vh = np.linalg.svd(patch.float().numpy(), full_matrices=False)
38
+ u_list.append(u[:, :1]) # Keep top principal direction
39
  except:
40
  continue
41
 
 
46
  # Fallback to full SVD for the merging step, but using the "Universal" concept
47
  # We posit the top k singular vectors form the shared subspace.
48
  try:
49
+ u, s, v = torch.svd_lowrank(tensor.float(), q=32) # Efficient randomized SVD
50
+ return u, v # Returns Left (U) and Right (V) singular vectors
51
  except:
52
  return None, None
53
 
 
72
  if not layer_tensors:
73
  continue
74
 
75
+ # Stack for analysis
76
+ # Shape: (N_adapters, rows, cols)
77
+ stack = torch.stack(layer_tensors)
78
  avg_weight = torch.mean(stack, dim=0)
79
 
80
  # 2. IF it's a LoRA weight (usually 'lora_A' or 'lora_B'), we try SAMM