phoebeklett commited on
Commit
10463cf
·
1 Parent(s): ac9e251

Delete attention.py

Browse files
Files changed (1) hide show
  1. attention.py +0 -771
attention.py DELETED
@@ -1,771 +0,0 @@
1
- # Adapted from https://github.com/mosaicml/llm-foundry
2
- # Classes changed: MultiheadAttention
3
- # Functions changed: scaled_multihead_dot_product_attention, build_alibi_bias, build_attn_bias
4
- # SPDX-License-Identifier: Apache-2.0
5
-
6
- """Attention layers."""
7
- import math
8
- import warnings
9
- from typing import Optional
10
- import torch
11
- import torch.nn as nn
12
- from einops import rearrange
13
- from packaging import version
14
- from torch import nn
15
- from torch.linalg import vector_norm
16
- from llmfoundry.models.layers.norm import LPLayerNorm
17
- from torch.nn import functional as F
18
-
19
- def _reset_is_causal(num_query_tokens: int, num_key_tokens: int,
20
- original_is_causal: bool):
21
- # disable causal when it is not needed
22
- # necessary for flash & triton for generation with kv_cache
23
- if original_is_causal and num_query_tokens != num_key_tokens:
24
- if num_query_tokens != 1:
25
- raise NotImplementedError(
26
- 'MPT does not support query and key with different number of tokens, unless number of query tokens is 1.'
27
- )
28
- else:
29
- return False
30
- return original_is_causal
31
-
32
-
33
- def scaled_multihead_dot_product_attention(
34
- query,
35
- key,
36
- value,
37
- n_heads,
38
- past_key_value=None,
39
- long_range_past_key_value=None,
40
- softmax_scale=None,
41
- attn_bias=None,
42
- attn_bias_ae=None,
43
- key_padding_mask=None,
44
- is_causal=False,
45
- dropout_p=0.0,
46
- training=False,
47
- needs_weights=False,
48
- multiquery=False,
49
- topk=None,
50
- faiss_indexes=None,
51
- n_layers=None,
52
- current_layer=None,
53
- mask_by_sim=False,
54
- sim_threshold=0.0
55
- ):
56
- q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
57
- kv_n_heads = 1 if multiquery else n_heads
58
- k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
59
- v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
60
-
61
- had_kv=False
62
- if past_key_value is not None:
63
- # attn_impl: flash & triton use kernels which expect input shape [b, s, h, d_head].
64
- # kv_cache is therefore stored using that shape.
65
- # attn_impl: torch stores the kv_cache in the ordering which is most advantageous
66
- # for its attn computation ie
67
- # keys are stored as tensors with shape [b, h, d_head, s] and
68
- # values are stored as tensors with shape [b, h, s, d_head]
69
- if len(past_key_value) != 0:
70
- k = torch.cat([past_key_value[0], k], dim=3)
71
- v = torch.cat([past_key_value[1], v], dim=2)
72
- had_kv=True
73
-
74
- past_key_value = (k, v)
75
-
76
- b, h, s_q, d = q.shape
77
- s_k = k.size(-1)
78
-
79
- if softmax_scale is None:
80
- softmax_scale = 1 / math.sqrt(d)
81
-
82
- attn_weight = q.matmul(k) * softmax_scale
83
-
84
- if attn_bias is not None:
85
- # clamp to 0 necessary for torch 2.0 compile()
86
- _s_q = max(0, attn_bias.size(2) - s_q)
87
- _s_k = max(0, attn_bias.size(3) - s_k)
88
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
89
-
90
- if (attn_bias.size(-1) != 1 and
91
- attn_bias.size(-1) != s_k) or (attn_bias.size(-2) != 1 and
92
- attn_bias.size(-2) != s_q):
93
- raise RuntimeError(
94
- f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.'
95
- )
96
- attn_weight = attn_weight + attn_bias
97
-
98
- if needs_weights:
99
- reshaped_idx = None
100
- if long_range_past_key_value is not None or faiss_indexes is not None:
101
- if long_range_past_key_value is not None: #manual external memories
102
-
103
- k_cache, v_cache = long_range_past_key_value
104
- s_cache = k_cache.size(-1)
105
-
106
- k_cache = k_cache.to(k.device)
107
- v_cache = v_cache.to(k.device)
108
-
109
- q_n = q/vector_norm(q, ord=2, dim=-1, keepdim=True)
110
- k_n = k_cache/vector_norm(k_cache, ord=2, dim=-2, keepdim=True)
111
-
112
- sim = q_n.matmul(k_n)
113
- if s_cache<topk:
114
- topk = s_cache #number of tokens in cache < topk
115
- val, idx = torch.topk(sim, k=topk, dim=-1)
116
-
117
- reshaped_idx = idx.reshape(b, h, s_q * topk)
118
-
119
- selected_k = k_cache.gather(dim=-1, index=reshaped_idx.unsqueeze(-2).expand(-1, -1, d, -1))
120
- selected_v = v_cache.gather(dim=-2, index=reshaped_idx.unsqueeze(-1).expand(-1, -1, -1, d))
121
-
122
- sim_mask = rearrange(~ (val > sim_threshold).bool(), 'b h s i -> b h (s i)').unsqueeze(-2).expand(-1, -1, s_q, -1)
123
- min_val = torch.finfo(selected_k.dtype).min
124
-
125
- elif faiss_indexes is not None: #faiss indexes
126
-
127
- kn_index, kv_index = faiss_indexes
128
- q_n = q/vector_norm(q, ord=2, dim=-1, keepdim=True)
129
-
130
- one_hot_encodings = F.one_hot(torch.arange(0, n_heads*n_layers, device=q.device))*10
131
- q_n = torch.concat([rearrange(q_n, 'b h s d -> b (h s) d', h=n_heads), one_hot_encodings[n_heads*current_layer:n_heads*(current_layer+1)].unsqueeze(0).repeat_interleave(repeats=q.size(-2), dim=-2)], dim=-1).squeeze()
132
-
133
- D, I = kn_index.search(q_n.to('cpu').numpy(), k=topk)
134
-
135
- selected_k=rearrange(torch.tensor(kv_index.reconstruct_batch(I.flatten()))[:,:d], '(h s) d -> 1 h d s', h=32).to(q.device)
136
- selected_v=rearrange(torch.tensor(kv_index.reconstruct_batch(I.flatten()))[:,d:], '(h s) d -> 1 h s d', h=32).to(q.device)
137
-
138
- s_k_ae = selected_k.size(-1)
139
- s_k += s_k_ae
140
- attn_weight_cache = q.matmul(selected_k) * softmax_scale
141
- if mask_by_sim:
142
- attn_weight_cache = attn_weight_cache.masked_fill(sim_mask, min_val)
143
-
144
- if attn_bias_ae is not None:
145
- # clamp to 0 necessary for torch 2.0 compile()
146
- _s_q = max(0, attn_bias_ae.size(2) - s_q)
147
- _s_k = max(0, attn_bias_ae.size(3) - s_k_ae)
148
- attn_bias_ae = attn_bias_ae[:, :, _s_q:, _s_k:]
149
-
150
- if (attn_bias_ae.size(-1) != 1 and
151
- attn_bias_ae.size(-1) != s_k_ae) or (attn_bias_ae.size(-2) != 1 and
152
- attn_bias_ae.size(-2) != s_q):
153
- raise RuntimeError(
154
- f'attn_bias (shape: {attn_bias_ae.shape}) is expected to broadcast to shape: {attn_weight_cache.shape}.'
155
- )
156
- attn_weight_cache = attn_weight_cache + attn_bias_ae
157
-
158
- attn_weight = torch.cat([attn_weight_cache, attn_weight], dim=-1)
159
- v = torch.cat([selected_v, v], dim=-2)
160
-
161
- min_val = torch.finfo(q.dtype).min
162
-
163
- if key_padding_mask is not None:
164
- if attn_bias is not None:
165
- warnings.warn(
166
- 'Propogating key_padding_mask to the attention module ' +\
167
- 'and applying it within the attention module can cause ' +\
168
- 'unneccessary computation/memory usage. Consider integrating ' +\
169
- 'into attn_bias once and passing that to each attention ' +\
170
- 'module instead.'
171
- )
172
- attn_weight = attn_weight.masked_fill(
173
- ~key_padding_mask.view((b, 1, 1, s_k)), min_val)
174
-
175
- def _create_active_externalism_mask(k, s_q, device):
176
- mask = torch.zeros(s_q, s_q * k, device=device, dtype=torch.bool)
177
- for i in range(s_q):
178
- mask[i, i * k : (i + 1) * k] = 1
179
- return ~mask
180
-
181
- if is_causal and (not q.size(2) == 1):
182
- s = max(s_q, s_k)
183
- causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
184
- causal_mask = causal_mask.tril()
185
- causal_mask = causal_mask.to(torch.bool)
186
- causal_mask = ~causal_mask
187
- causal_mask = causal_mask[-s_q:, -s_k:]
188
-
189
- if long_range_past_key_value is not None:
190
- mask = _create_active_externalism_mask(k=topk,s_q=s_q, device=attn_weight.device)
191
- s=s_q
192
- if had_kv:
193
- s += (past_key_value[0][0].size(-1) -s_q)
194
- causal_mask = torch.cat([mask, causal_mask[:,-s:]], dim=1)
195
-
196
- attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k),
197
- min_val)
198
-
199
- attn_weight = torch.softmax(attn_weight, dim=-1)
200
-
201
- if dropout_p:
202
- attn_weight = torch.nn.functional.dropout(attn_weight,
203
- p=dropout_p,
204
- training=training,
205
- inplace=True)
206
-
207
- out = attn_weight.to(v.dtype).matmul(v)
208
- out = rearrange(out, 'b h s d -> b s (h d)')
209
-
210
- if needs_weights:
211
- return out, attn_weight, past_key_value, reshaped_idx
212
- return out, None, past_key_value, None
213
-
214
-
215
- def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
216
- for tensor in tensors:
217
- if tensor.dtype not in valid_dtypes:
218
- raise TypeError(f'{tensor.dtype=} must be in {valid_dtypes=}.')
219
- if not tensor.is_cuda:
220
- raise TypeError(f'Inputs must be cuda tensors ({tensor.is_cuda=}).')
221
-
222
-
223
- def flash_attn_fn(
224
- query,
225
- key,
226
- value,
227
- n_heads,
228
- past_key_value=None,
229
- softmax_scale=None,
230
- attn_bias=None,
231
- key_padding_mask=None,
232
- is_causal=False,
233
- dropout_p=0.0,
234
- training=False,
235
- needs_weights=False,
236
- multiquery=False,
237
- ):
238
- try:
239
- from flash_attn import bert_padding, flash_attn_interface # type: ignore # yapf: disable # isort: skip
240
- except:
241
- raise RuntimeError('Please install flash-attn==1.0.3.post0')
242
-
243
- check_valid_inputs(query, key, value)
244
-
245
- if past_key_value is not None:
246
- if len(past_key_value) != 0:
247
- key = torch.cat([past_key_value[0], key], dim=1)
248
- value = torch.cat([past_key_value[1], value], dim=1)
249
-
250
- past_key_value = (key, value)
251
-
252
- if attn_bias is not None:
253
- # clamp to 0 necessary for torch 2.0 compile()
254
- _s_q = max(0, attn_bias.size(2) - query.size(1))
255
- _s_k = max(0, attn_bias.size(3) - key.size(1))
256
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
257
-
258
- if attn_bias is not None:
259
- raise NotImplementedError(f'attn_bias not implemented for flash attn.')
260
-
261
- batch_size, seqlen = query.shape[:2]
262
-
263
- if key_padding_mask is None:
264
- key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
265
- query_padding_mask = key_padding_mask[:, -query.size(1):]
266
-
267
- query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(
268
- query, query_padding_mask)
269
- query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
270
-
271
- key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(
272
- key, key_padding_mask)
273
- key_unpad = rearrange(key_unpad,
274
- 'nnz (h d) -> nnz h d',
275
- h=1 if multiquery else n_heads)
276
-
277
- value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)
278
- value_unpad = rearrange(value_unpad,
279
- 'nnz (h d) -> nnz h d',
280
- h=1 if multiquery else n_heads)
281
-
282
- if multiquery:
283
- # Expanding a tensor does not allocate new memory, but only creates a new
284
- # view on the existing tensor where a dimension of size one is expanded
285
- # to a larger size by setting the stride to 0.
286
- # - pytorch docs
287
- #
288
- # hopefully the kernels can utilize this and we're jot just wasting BW here
289
- key_unpad = key_unpad.expand(key_unpad.size(0), n_heads,
290
- key_unpad.size(-1))
291
- value_unpad = value_unpad.expand(value_unpad.size(0), n_heads,
292
- value_unpad.size(-1))
293
-
294
- dropout_p = dropout_p if training else 0.0
295
-
296
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
297
-
298
- output_unpad = flash_attn_interface.flash_attn_unpadded_func(
299
- query_unpad,
300
- key_unpad,
301
- value_unpad,
302
- cu_seqlens_q,
303
- cu_seqlens_k,
304
- max_seqlen_q,
305
- max_seqlen_k,
306
- dropout_p,
307
- softmax_scale=softmax_scale,
308
- causal=reset_is_causal,
309
- return_attn_probs=needs_weights)
310
-
311
- output = bert_padding.pad_input(
312
- rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size,
313
- seqlen)
314
- return output, None, past_key_value
315
-
316
-
317
- def triton_flash_attn_fn(
318
- query,
319
- key,
320
- value,
321
- n_heads,
322
- past_key_value=None,
323
- softmax_scale=None,
324
- attn_bias=None,
325
- key_padding_mask=None,
326
- is_causal=False,
327
- dropout_p=0.0,
328
- training=False,
329
- needs_weights=False,
330
- multiquery=False,
331
- ):
332
- try:
333
- from llmfoundry.models.layers.flash_attn_triton import flash_attn_func
334
- except:
335
- _installed = False
336
- if version.parse(torch.__version__) < version.parse('2.0.0'):
337
- _installed = True
338
- # if torch1.13.1 revert to using triton flash attn from HazyResearch
339
- # with flash-attn==1.0.3.post0 and triton==2.0.0.dev20221202
340
- try:
341
- from flash_attn.flash_attn_triton import flash_attn_func
342
- except:
343
- _installed = False
344
- if not _installed:
345
- # installing triton-pre-mlir works for both torch1.13.1 and torch2.0+
346
- # default recommendation is to install this variant
347
- raise RuntimeError(
348
- 'Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU '
349
- 'and `pip install .[gpu]` if installing from llm-foundry source or '
350
- '`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` '
351
- 'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). '
352
- 'Note: (1) requires you have CMake and PyTorch already installed.'
353
- )
354
-
355
- check_valid_inputs(query, key, value)
356
-
357
- if past_key_value is not None:
358
- if len(past_key_value) != 0:
359
- key = torch.cat([past_key_value[0], key], dim=1)
360
- value = torch.cat([past_key_value[1], value], dim=1)
361
-
362
- past_key_value = (key, value)
363
-
364
- if attn_bias is not None:
365
- # clamp to 0 necessary for torch 2.0 compile()
366
- _s_q = max(0, attn_bias.size(2) - query.size(1))
367
- _s_k = max(0, attn_bias.size(3) - key.size(1))
368
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
369
-
370
- if dropout_p:
371
- raise NotImplementedError(
372
- f'Dropout not implemented for attn_impl: triton.')
373
-
374
- if needs_weights:
375
- raise NotImplementedError(
376
- f'attn_impl: triton cannot return attn weights.')
377
-
378
- if key_padding_mask is not None:
379
- warnings.warn(
380
- 'Propagating key_padding_mask to the attention module ' +\
381
- 'and applying it within the attention module can cause ' +\
382
- 'unnecessary computation/memory usage. Consider integrating ' +\
383
- 'into attn_bias once and passing that to each attention ' +\
384
- 'module instead.'
385
- )
386
- b_size, s_k = key_padding_mask.shape[:2]
387
-
388
- if attn_bias is None:
389
- attn_bias = query.new_zeros(b_size, 1, 1, s_k)
390
-
391
- attn_bias = attn_bias.masked_fill(
392
- ~key_padding_mask.view((b_size, 1, 1, s_k)),
393
- torch.finfo(query.dtype).min)
394
-
395
- query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
396
- key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
397
- value = rearrange(value,
398
- 'b s (h d) -> b s h d',
399
- h=1 if multiquery else n_heads)
400
-
401
- if multiquery:
402
- # Expanding a tensor does not allocate new memory, but only creates a new
403
- # view on the existing tensor where a dimension of size one is expanded
404
- # to a larger size by setting the stride to 0.
405
- # - pytorch docs
406
- #
407
- # hopefully the kernels can utilize this and we're jot just wasting BW here
408
- key = key.expand(*key.shape[:2], n_heads, key.size(-1))
409
- value = value.expand(*value.shape[:2], n_heads, value.size(-1))
410
-
411
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
412
- attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal,
413
- softmax_scale)
414
-
415
- output = attn_output.view(*attn_output.shape[:2], -1)
416
-
417
- return output, None, past_key_value
418
-
419
-
420
- class MultiheadAttention(nn.Module):
421
- """Multi-head self attention.
422
-
423
- Using torch or triton attention implemetation enables user to also use
424
- additive bias.
425
- """
426
-
427
- def __init__(
428
- self,
429
- d_model: int,
430
- n_heads: int,
431
- attn_impl: str = 'triton',
432
- clip_qkv: Optional[float] = None,
433
- qk_ln: bool = False,
434
- softmax_scale: Optional[float] = None,
435
- attn_pdrop: float = 0.0,
436
- low_precision_layernorm: bool = False,
437
- verbose: int = 0,
438
- device: Optional[str] = None,
439
- ):
440
- super().__init__()
441
-
442
- self.attn_impl = attn_impl
443
- self.clip_qkv = clip_qkv
444
- self.qk_ln = qk_ln
445
-
446
- self.d_model = d_model
447
- self.n_heads = n_heads
448
- self.softmax_scale = softmax_scale
449
- if self.softmax_scale is None:
450
- self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
451
- self.attn_dropout_p = attn_pdrop
452
-
453
- self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
454
- # for param init fn; enables shape based init of fused layers
455
- fuse_splits = (d_model, 2 * d_model)
456
- self.Wqkv._fused = (0, fuse_splits) # type: ignore
457
-
458
- if self.qk_ln:
459
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
460
- self.q_ln = layernorm_class(self.d_model, device=device)
461
- self.k_ln = layernorm_class(self.d_model, device=device)
462
-
463
- if self.attn_impl == 'flash':
464
- self.attn_fn = flash_attn_fn
465
- elif self.attn_impl == 'triton':
466
- self.attn_fn = triton_flash_attn_fn
467
- if verbose:
468
- warnings.warn(
469
- 'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
470
- 'it uses more memory. When training larger models this can trigger ' +\
471
- 'alloc retries which hurts performance. If encountered, we recommend ' +\
472
- 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
473
- )
474
- elif self.attn_impl == 'torch':
475
- self.attn_fn = scaled_multihead_dot_product_attention
476
- if torch.cuda.is_available() and verbose:
477
- warnings.warn(
478
- 'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
479
- '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
480
- 'we recommend using `attn_impl: triton`.'
481
- )
482
- else:
483
- raise ValueError(f'{attn_impl=} is an invalid setting.')
484
-
485
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
486
- self.out_proj._is_residual = True # type: ignore
487
-
488
- def forward(
489
- self,
490
- x,
491
- past_key_value=None,
492
- long_range_past_key_value=None,
493
- attn_bias=None,
494
- attn_bias_ae=None,
495
- attention_mask=None,
496
- is_causal=True,
497
- needs_weights=False,
498
- topk=None,
499
- faiss_indexes=None,
500
- n_layers=None,
501
- current_layer=None,
502
- mask_by_sim=None,
503
- sim_threshold=None
504
- ):
505
- qkv = self.Wqkv(x)
506
-
507
- if self.clip_qkv:
508
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
509
-
510
- query, key, value = qkv.chunk(3, dim=2)
511
-
512
- key_padding_mask = attention_mask
513
-
514
- if self.qk_ln:
515
- # Applying layernorm to qk
516
- dtype = query.dtype
517
- query = self.q_ln(query).to(dtype)
518
- key = self.k_ln(key).to(dtype)
519
-
520
- context, attn_weights, past_key_value, reshaped_idx = self.attn_fn(
521
- query,
522
- key,
523
- value,
524
- self.n_heads,
525
- past_key_value=past_key_value,
526
- long_range_past_key_value=long_range_past_key_value,
527
- softmax_scale=self.softmax_scale,
528
- attn_bias=attn_bias,
529
- attn_bias_ae=attn_bias_ae,
530
- key_padding_mask=key_padding_mask,
531
- is_causal=is_causal,
532
- dropout_p=self.attn_dropout_p,
533
- training=self.training,
534
- needs_weights=needs_weights,
535
- topk=topk,
536
- faiss_indexes=faiss_indexes,
537
- n_layers=n_layers,
538
- current_layer=current_layer,
539
- mask_by_sim=mask_by_sim,
540
- sim_threshold=sim_threshold
541
- )
542
-
543
- return self.out_proj(context), attn_weights, past_key_value, reshaped_idx
544
-
545
-
546
- class MultiQueryAttention(nn.Module):
547
- """Multi-Query self attention.
548
-
549
- Using torch or triton attention implemetation enables user to also use
550
- additive bias.
551
- """
552
-
553
- def __init__(
554
- self,
555
- d_model: int,
556
- n_heads: int,
557
- attn_impl: str = 'triton',
558
- clip_qkv: Optional[float] = None,
559
- qk_ln: bool = False,
560
- softmax_scale: Optional[float] = None,
561
- attn_pdrop: float = 0.0,
562
- low_precision_layernorm: bool = False,
563
- verbose: int = 0,
564
- device: Optional[str] = None,
565
- ):
566
- super().__init__()
567
-
568
- self.attn_impl = attn_impl
569
- self.clip_qkv = clip_qkv
570
- self.qk_ln = qk_ln
571
-
572
- self.d_model = d_model
573
- self.n_heads = n_heads
574
- self.head_dim = d_model // n_heads
575
- self.softmax_scale = softmax_scale
576
- if self.softmax_scale is None:
577
- self.softmax_scale = 1 / math.sqrt(self.head_dim)
578
- self.attn_dropout_p = attn_pdrop
579
-
580
- # NOTE: if we ever want to make attn TensorParallel, I'm pretty sure we'll
581
- # want to split Wqkv into Wq and Wkv where Wq can be TensorParallel but
582
- # Wkv shouldn't be TensorParallel
583
- # - vchiley
584
- self.Wqkv = nn.Linear(
585
- d_model,
586
- d_model + 2 * self.head_dim,
587
- device=device,
588
- )
589
- # for param init fn; enables shape based init of fused layers
590
- fuse_splits = (d_model, d_model + self.head_dim)
591
- self.Wqkv._fused = (0, fuse_splits) # type: ignore
592
-
593
- if self.qk_ln:
594
- layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
595
- self.q_ln = layernorm_class(d_model, device=device)
596
- self.k_ln = layernorm_class(self.head_dim, device=device)
597
-
598
- if self.attn_impl == 'flash':
599
- self.attn_fn = flash_attn_fn
600
- elif self.attn_impl == 'triton':
601
- self.attn_fn = triton_flash_attn_fn
602
- if verbose:
603
- warnings.warn(
604
- 'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
605
- 'it uses more memory. When training larger models this can trigger ' +\
606
- 'alloc retries which hurts performance. If encountered, we recommend ' +\
607
- 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
608
- )
609
- elif self.attn_impl == 'torch':
610
- self.attn_fn = scaled_multihead_dot_product_attention
611
- if torch.cuda.is_available() and verbose:
612
- warnings.warn(
613
- 'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
614
- '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
615
- 'we recommend using `attn_impl: triton`.'
616
- )
617
- else:
618
- raise ValueError(f'{attn_impl=} is an invalid setting.')
619
-
620
- self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
621
- self.out_proj._is_residual = True # type: ignore
622
-
623
- def forward(
624
- self,
625
- x,
626
- past_key_value=None,
627
- attn_bias=None,
628
- attention_mask=None,
629
- is_causal=True,
630
- needs_weights=False,
631
- ):
632
- qkv = self.Wqkv(x)
633
-
634
- if self.clip_qkv:
635
- qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
636
-
637
- query, key, value = qkv.split(
638
- [self.d_model, self.head_dim, self.head_dim], dim=2)
639
-
640
- key_padding_mask = attention_mask
641
-
642
- if self.qk_ln:
643
- # Applying layernorm to qk
644
- dtype = query.dtype
645
- query = self.q_ln(query).to(dtype)
646
- key = self.k_ln(key).to(dtype)
647
-
648
- context, attn_weights, past_key_value = self.attn_fn(
649
- query,
650
- key,
651
- value,
652
- self.n_heads,
653
- past_key_value=past_key_value,
654
- softmax_scale=self.softmax_scale,
655
- attn_bias=attn_bias,
656
- key_padding_mask=key_padding_mask,
657
- is_causal=is_causal,
658
- dropout_p=self.attn_dropout_p,
659
- training=self.training,
660
- needs_weights=needs_weights,
661
- multiquery=True,
662
- )
663
-
664
- return self.out_proj(context), attn_weights, past_key_value
665
-
666
-
667
- def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal,
668
- use_sequence_id):
669
- if attn_impl == 'flash':
670
- return None
671
- elif attn_impl in ['torch', 'triton']:
672
- if alibi:
673
- if (prefix_lm or not causal) or use_sequence_id:
674
- return (1, n_heads, seq_len, seq_len)
675
- return (1, n_heads, 1, seq_len)
676
- elif prefix_lm or use_sequence_id:
677
- return (1, 1, seq_len, seq_len)
678
- return None
679
- else:
680
- raise ValueError(f'{attn_impl=} is an invalid setting.')
681
-
682
-
683
- def build_attn_bias(
684
- attn_impl,
685
- n_heads,
686
- seq_len,
687
- attn_bias=None,
688
- causal=False,
689
- alibi=False,
690
- alibi_bias_max=8,
691
- for_ae=False,
692
- topk=0,
693
- device=None,
694
- dtype=None
695
- ):
696
- if attn_impl == 'flash':
697
- return None
698
- elif attn_impl in ['torch', 'triton']:
699
- if alibi:
700
- # in place add alibi to attn bias
701
- if attn_bias is not None:
702
- attn_bias = attn_bias.add(
703
- build_alibi_bias(
704
- n_heads,
705
- seq_len,
706
- full=not causal,
707
- alibi_bias_max=alibi_bias_max,
708
- device=device,
709
- dtype=dtype,
710
- for_ae=for_ae,
711
- topk=topk
712
- ))
713
- else:
714
- attn_bias = build_alibi_bias(
715
- n_heads,
716
- seq_len,
717
- full=not causal,
718
- alibi_bias_max=alibi_bias_max,
719
- for_ae=for_ae,
720
- topk=topk)
721
- return attn_bias
722
-
723
-
724
- def gen_slopes(n_heads, alibi_bias_max=8, device=None):
725
- _n_heads = 2**math.ceil(math.log2(n_heads))
726
- m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
727
- m = m.mul(alibi_bias_max / _n_heads)
728
- slopes = (1. / torch.pow(2, m))
729
-
730
- if _n_heads != n_heads:
731
- # if n_heads is not a power of two,
732
- # Huggingface and FasterTransformer calculate slopes normally,
733
- # then return this strided concatenation of slopes
734
- slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
735
-
736
- return slopes.view(1, n_heads, 1, 1)
737
-
738
-
739
- def build_alibi_bias(
740
- n_heads,
741
- seq_len,
742
- full=False,
743
- alibi_bias_max=8,
744
- device=None,
745
- dtype=None,
746
- for_ae=False,
747
- topk=0
748
- ):
749
- if not for_ae:
750
- alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32,
751
- device=device).view(1, 1, 1, seq_len)
752
- else:
753
- alibi_bias = torch.tensor(-seq_len, dtype=torch.int32,
754
- device=device).repeat(seq_len*topk).view(1, 1, 1, seq_len*(topk))
755
- if full:
756
- # generate 1 x Heads x SeqLen x SeqLen alibi bias mask
757
- # otherwise the mask is 1 x Heads x 1 x SeqLen (which is broadcast to the appropriate size)
758
- alibi_bias = alibi_bias - torch.arange(
759
- 1 - seq_len, 1, dtype=torch.int32, device=device).view(
760
- 1, 1, seq_len, 1)
761
- alibi_bias = alibi_bias.abs().mul(-1)
762
-
763
- slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
764
- alibi_bias = alibi_bias * slopes
765
- return alibi_bias.to(dtype=dtype)
766
-
767
-
768
- ATTN_CLASS_REGISTRY = {
769
- 'multihead_attention': MultiheadAttention,
770
- 'multiquery_attention': MultiQueryAttention,
771
- }