Update modeling_super_linear.py
Browse files- modeling_super_linear.py +0 -4
modeling_super_linear.py
CHANGED
|
@@ -205,11 +205,9 @@ class RLinear(nn.Module):
|
|
| 205 |
if mode == 1:
|
| 206 |
W = self.Linear.weight.detach()
|
| 207 |
new_W = W[:, -new_lookback:]
|
| 208 |
-
#new_W = W[:, :new_lookback]
|
| 209 |
original_norm = torch.norm(W, p=2)
|
| 210 |
new_norm = torch.norm(new_W, p=2)
|
| 211 |
final_scaling = original_norm / new_norm if new_norm.item() != 0 else 1.0
|
| 212 |
-
final_scaling = 1
|
| 213 |
new_W = new_W * final_scaling
|
| 214 |
|
| 215 |
self.zero_shot_Linear = new_W
|
|
@@ -240,11 +238,9 @@ class RLinear(nn.Module):
|
|
| 240 |
#print(F"new Lookkback : {x.shape[1]}")
|
| 241 |
|
| 242 |
self.transform_model(x.shape[1],1)
|
| 243 |
-
x = x * (x.shape[1]/512)
|
| 244 |
x = self.revin_layer(x, 'norm')
|
| 245 |
x = F.linear(x, self.zero_shot_Linear)
|
| 246 |
x = self.revin_layer(x, 'denorm')
|
| 247 |
-
x = x * (512/x.shape[1])
|
| 248 |
return x
|
| 249 |
|
| 250 |
|
|
|
|
| 205 |
if mode == 1:
|
| 206 |
W = self.Linear.weight.detach()
|
| 207 |
new_W = W[:, -new_lookback:]
|
|
|
|
| 208 |
original_norm = torch.norm(W, p=2)
|
| 209 |
new_norm = torch.norm(new_W, p=2)
|
| 210 |
final_scaling = original_norm / new_norm if new_norm.item() != 0 else 1.0
|
|
|
|
| 211 |
new_W = new_W * final_scaling
|
| 212 |
|
| 213 |
self.zero_shot_Linear = new_W
|
|
|
|
| 238 |
#print(F"new Lookkback : {x.shape[1]}")
|
| 239 |
|
| 240 |
self.transform_model(x.shape[1],1)
|
|
|
|
| 241 |
x = self.revin_layer(x, 'norm')
|
| 242 |
x = F.linear(x, self.zero_shot_Linear)
|
| 243 |
x = self.revin_layer(x, 'denorm')
|
|
|
|
| 244 |
return x
|
| 245 |
|
| 246 |
|