tefoteknik commited on
Commit
a878980
·
verified ·
1 Parent(s): c095d2e

Upload src/models/layers.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/models/layers.py +39 -18
src/models/layers.py CHANGED
@@ -45,8 +45,9 @@ class LinearAttention(nn.Module):
45
 
46
  # Feature Map: ELU + 1 (Standard)
47
  # Stability Fix: Normalize Q and K to keep dot products in check
48
- q = F.elu(q) + 1.0
49
- k = F.elu(k) + 1.0
 
50
 
51
  # Scale to prevent huge sums
52
  # Standard attention divides by sqrt(dk), here we do it to Q
@@ -73,8 +74,8 @@ class LinearAttention(nn.Module):
73
  # (B, L, H, E) * (B, L, H, E) -> (B, L, H)
74
  den = torch.einsum('blhe,blhe->blh', q, k_cumsum)
75
 
76
- # Stability Fix: Larger epsilon
77
- den = den.unsqueeze(-1) + 1e-5
78
 
79
  out = num / den
80
 
@@ -102,29 +103,49 @@ class SlidingWindowAttention(nn.Module):
102
 
103
  def forward(self, x: torch.Tensor) -> torch.Tensor:
104
  B, L, D = x.shape
 
 
 
105
 
106
- qkv = self.qkv(x).reshape(B, L, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
 
107
  q, k, v = qkv[0], qkv[1], qkv[2]
108
 
109
- # Construct Sliding Window Mask manually to avoid SDPA kernel issues with complex constraints
110
- # Or simply rely on PyTorch 2.0+ causal masking if strict window is hard
111
- # Stability Fix: Use a simpler causal mask + manual zeroing for window
 
 
 
 
 
 
 
 
 
112
 
113
- # Full Causal Mask
114
- mask = torch.ones(L, L, device=x.device, dtype=torch.bool).tril(0)
115
- # Window constraint: Keep only if i - j < window_size
116
- # i.e., j > i - window_size
117
- window_mask = torch.ones(L, L, device=x.device, dtype=torch.bool).tril(0).triu(-(self.window_size - 1))
118
 
119
- # Combine: We need a bias mask where False -> -inf
120
- # SDPA expects attn_mask to be float (0 or -inf) or bool (True=Masked/NotAllowed)
121
- # Let's use bool: True means "Don't Attend"
122
 
123
- final_mask = ~window_mask
 
 
124
 
125
- out = F.scaled_dot_product_attention(q, k, v, attn_mask=final_mask, is_causal=False)
 
126
 
 
 
 
 
 
 
 
127
  out = out.transpose(1, 2).reshape(B, L, D)
 
128
  return self.proj(out)
129
 
130
  class HybridBlock(nn.Module):
 
45
 
46
  # Feature Map: ELU + 1 (Standard)
47
  # Stability Fix: Normalize Q and K to keep dot products in check
48
+ # Stability Fix 2: Ensure strictly positive to avoid 0 denominator
49
+ q = F.elu(q) + 1.0 + 1e-4
50
+ k = F.elu(k) + 1.0 + 1e-4
51
 
52
  # Scale to prevent huge sums
53
  # Standard attention divides by sqrt(dk), here we do it to Q
 
74
  # (B, L, H, E) * (B, L, H, E) -> (B, L, H)
75
  den = torch.einsum('blhe,blhe->blh', q, k_cumsum)
76
 
77
+ # Stability Fix: Larger epsilon and absolute check
78
+ den = den.unsqueeze(-1) + 1e-4
79
 
80
  out = num / den
81
 
 
103
 
104
  def forward(self, x: torch.Tensor) -> torch.Tensor:
105
  B, L, D = x.shape
106
+ H = self.num_heads
107
+ E = self.head_dim
108
+ scale = 1.0 / (E ** 0.5)
109
 
110
+ # (B, L, 3, H, E) -> (3, B, H, L, E)
111
+ qkv = self.qkv(x).reshape(B, L, 3, H, E).permute(2, 0, 3, 1, 4)
112
  q, k, v = qkv[0], qkv[1], qkv[2]
113
 
114
+ # Manual Attention for Stability
115
+ # (B, H, L, E) @ (B, H, E, L) -> (B, H, L, L)
116
+ scores = torch.matmul(q, k.transpose(-2, -1)) * scale
117
+
118
+ # Construct Mask
119
+ # Window constraint: j > i - window_size => i - j < window_size
120
+ # Causal: j <= i
121
+ # Valid: i - window_size < j <= i
122
+
123
+ # Mask is True where we want to BLOCK
124
+ # 1. Causal Block: j > i (triu(1))
125
+ # 2. Window Block: j <= i - window_size (tril(-window_size))
126
 
127
+ ones = torch.ones(L, L, device=x.device, dtype=torch.bool)
128
+ causal_mask = ones.triu(1)
129
+ window_mask = ones.tril(-self.window_size)
 
 
130
 
131
+ mask = causal_mask | window_mask
 
 
132
 
133
+ # Apply Mask
134
+ # Use -1e4 instead of -inf for stability
135
+ scores = scores.masked_fill(mask, -1e4)
136
 
137
+ # Softmax
138
+ attn = F.softmax(scores, dim=-1)
139
 
140
+ # Dropout (if needed, but not in init args currently)
141
+ # attn = F.dropout(attn, p=0.1)
142
+
143
+ # (B, H, L, L) @ (B, H, L, E) -> (B, H, L, E)
144
+ out = torch.matmul(attn, v)
145
+
146
+ # (B, H, L, E) -> (B, L, H, E) -> (B, L, D)
147
  out = out.transpose(1, 2).reshape(B, L, D)
148
+
149
  return self.proj(out)
150
 
151
  class HybridBlock(nn.Module):