jordan0811 commited on
Commit
e8121a6
·
verified ·
1 Parent(s): 5990989

Delete files resampler.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. resampler.py +0 -782
resampler.py DELETED
@@ -1,782 +0,0 @@
1
- from functools import partial
2
- from typing import Optional, Tuple, List
3
- import numpy as np
4
- import warnings
5
-
6
- import torch
7
- from torch import nn
8
- from torch import Tensor
9
- import torch.nn.functional as F
10
- from torch.nn.functional import *
11
- from torch.nn.modules.activation import *
12
- from torch.nn.init import trunc_normal_, constant_, xavier_normal_, xavier_uniform_
13
-
14
- from transformers.integrations import is_deepspeed_zero3_enabled
15
-
16
- def get_2d_sincos_pos_embed(embed_dim, image_size):
17
- """
18
- image_size: image_size or (image_height, image_width)
19
- return:
20
- pos_embed: [image_height, image_width, embed_dim]
21
- """
22
- if isinstance(image_size, int):
23
- grid_h_size, grid_w_size = image_size, image_size
24
- else:
25
- grid_h_size, grid_w_size = image_size[0], image_size[1]
26
-
27
- grid_h = np.arange(grid_h_size, dtype=np.float32)
28
- grid_w = np.arange(grid_w_size, dtype=np.float32)
29
- grid = np.meshgrid(grid_w, grid_h) # here w goes first
30
- grid = np.stack(grid, axis=0)
31
-
32
- pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
33
- return pos_embed
34
-
35
-
36
- def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
37
- assert embed_dim % 2 == 0
38
-
39
- # use half of dimensions to encode grid_h
40
- emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
41
- emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
42
-
43
- emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
44
- return emb
45
-
46
-
47
- def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
48
- """
49
- embed_dim: output dimension for each position
50
- pos: a list of positions to be encoded: size (H, W)
51
- out: (H, W, D)
52
- """
53
- assert embed_dim % 2 == 0
54
- omega = np.arange(embed_dim // 2, dtype=np.float32)
55
- omega /= embed_dim / 2.
56
- omega = 1. / 10000 ** omega # (D/2,)
57
-
58
- out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
59
-
60
- emb_sin = np.sin(out) # (H, W, D/2)
61
- emb_cos = np.cos(out) # (H, W, D/2)
62
-
63
- emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
64
- return emb
65
-
66
-
67
- class Resampler(nn.Module):
68
- """
69
- A 2D perceiver-resampler network with one cross attention layers by
70
- given learnable queries and 2d sincos pos_emb
71
- Outputs:
72
- A tensor with the shape of (batch_size, num_queries, embed_dim)
73
- """
74
-
75
- def __init__(
76
- self,
77
- num_queries,
78
- embed_dim,
79
- num_heads,
80
- kv_dim=None,
81
- norm_layer=partial(nn.LayerNorm, eps=1e-6),
82
- adaptive=False,
83
- max_size=(70, 70),
84
- ):
85
- super().__init__()
86
- self.num_queries = num_queries
87
- self.embed_dim = embed_dim
88
- self.num_heads = num_heads
89
- self.adaptive = adaptive
90
- self.max_size = max_size
91
-
92
- self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
93
-
94
- if kv_dim is not None and kv_dim != embed_dim:
95
- self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
96
- else:
97
- self.kv_proj = nn.Identity()
98
-
99
- self.attn = MultiheadAttention(embed_dim, num_heads)
100
- self.ln_q = norm_layer(embed_dim)
101
- self.ln_kv = norm_layer(embed_dim)
102
-
103
- self.ln_post = norm_layer(embed_dim)
104
- self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
105
-
106
- self._set_2d_pos_cache(self.max_size)
107
-
108
- def _set_2d_pos_cache(self, max_size, device='cpu'):
109
- if is_deepspeed_zero3_enabled():
110
- device='cuda'
111
- pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
112
- self.register_buffer("pos_embed", pos_embed, persistent=False)
113
-
114
- def _adjust_pos_cache(self, tgt_sizes, device):
115
- max_h = torch.max(tgt_sizes[:, 0])
116
- max_w = torch.max(tgt_sizes[:, 1])
117
- if max_h > self.max_size[0] or max_w > self.max_size[1]:
118
- self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
119
- self._set_2d_pos_cache(self.max_size, device)
120
-
121
- def _initialize_weights(self, m):
122
- if isinstance(m, nn.Linear):
123
- trunc_normal_(m.weight, std=.02)
124
- if isinstance(m, nn.Linear) and m.bias is not None:
125
- nn.init.constant_(m.bias, 0)
126
- elif isinstance(m, nn.LayerNorm):
127
- nn.init.constant_(m.bias, 0)
128
- nn.init.constant_(m.weight, 1.0)
129
-
130
- def forward(self, x, tgt_sizes=None):
131
- assert x.shape[0] == tgt_sizes.shape[0]
132
- bs = x.shape[0]
133
-
134
- device = x.device
135
- dtype = x.dtype
136
-
137
- patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
138
-
139
- self._adjust_pos_cache(tgt_sizes, device=device)
140
-
141
- max_patch_len = torch.max(patch_len)
142
- key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
143
-
144
- pos_embed = []
145
- for i in range(bs):
146
- tgt_h, tgt_w = tgt_sizes[i]
147
- pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
148
- key_padding_mask[i, patch_len[i]:] = True
149
-
150
- pos_embed = torch.nn.utils.rnn.pad_sequence(
151
- pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
152
-
153
- x = self.kv_proj(x) # B * L * D
154
- x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
155
-
156
- q = self.ln_q(self.query) # Q * D
157
-
158
- out = self.attn(
159
- self._repeat(q, bs), # Q * B * D
160
- x + pos_embed, # L * B * D + L * B * D
161
- x,
162
- key_padding_mask=key_padding_mask)[0]
163
- # out: Q * B * D
164
- x = out.permute(1, 0, 2) # B * Q * D
165
-
166
- x = self.ln_post(x)
167
- x = x @ self.proj
168
- return x
169
-
170
- def _repeat(self, query, N: int):
171
- return query.unsqueeze(1).repeat(1, N, 1)
172
-
173
-
174
- class MultiheadAttention(nn.MultiheadAttention):
175
- def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
176
- add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
177
- super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
178
-
179
- # rewrite out_proj layer,with nn.Linear
180
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
181
-
182
- def forward(
183
- self,
184
- query: Tensor,
185
- key: Tensor,
186
- value: Tensor,
187
- key_padding_mask: Optional[Tensor] = None,
188
- need_weights: bool = True,
189
- attn_mask: Optional[Tensor] = None,
190
- average_attn_weights: bool = True,
191
- is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
192
- why_not_fast_path = ''
193
- if ((attn_mask is not None and torch.is_floating_point(attn_mask))
194
- or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
195
- why_not_fast_path = "floating-point masks are not supported for fast path."
196
-
197
- is_batched = query.dim() == 3
198
-
199
- key_padding_mask = _canonical_mask(
200
- mask=key_padding_mask,
201
- mask_name="key_padding_mask",
202
- other_type=_none_or_dtype(attn_mask),
203
- other_name="attn_mask",
204
- target_type=query.dtype
205
- )
206
-
207
- attn_mask = _canonical_mask(
208
- mask=attn_mask,
209
- mask_name="attn_mask",
210
- other_type=None,
211
- other_name="",
212
- target_type=query.dtype,
213
- check_other=False,
214
- )
215
-
216
-
217
- if not is_batched:
218
- why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
219
- elif query is not key or key is not value:
220
- # When lifting this restriction, don't forget to either
221
- # enforce that the dtypes all match or test cases where
222
- # they don't!
223
- why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
224
- elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
225
- why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
226
- elif self.in_proj_weight is None:
227
- why_not_fast_path = "in_proj_weight was None"
228
- elif query.dtype != self.in_proj_weight.dtype:
229
- # this case will fail anyway, but at least they'll get a useful error message.
230
- why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
231
- elif self.training:
232
- why_not_fast_path = "training is enabled"
233
- elif (self.num_heads % 2) != 0:
234
- why_not_fast_path = "self.num_heads is not even"
235
- elif not self.batch_first:
236
- why_not_fast_path = "batch_first was not True"
237
- elif self.bias_k is not None:
238
- why_not_fast_path = "self.bias_k was not None"
239
- elif self.bias_v is not None:
240
- why_not_fast_path = "self.bias_v was not None"
241
- elif self.add_zero_attn:
242
- why_not_fast_path = "add_zero_attn was enabled"
243
- elif not self._qkv_same_embed_dim:
244
- why_not_fast_path = "_qkv_same_embed_dim was not True"
245
- elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
246
- why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
247
- is not supported with NestedTensor input"
248
- elif torch.is_autocast_enabled():
249
- why_not_fast_path = "autocast is enabled"
250
-
251
- if not why_not_fast_path:
252
- tensor_args = (
253
- query,
254
- key,
255
- value,
256
- self.in_proj_weight,
257
- self.in_proj_bias,
258
- self.out_proj.weight,
259
- self.out_proj.bias,
260
- )
261
- # We have to use list comprehensions below because TorchScript does not support
262
- # generator expressions.
263
- if torch.overrides.has_torch_function(tensor_args):
264
- why_not_fast_path = "some Tensor argument has_torch_function"
265
- elif _is_make_fx_tracing():
266
- why_not_fast_path = "we are running make_fx tracing"
267
- elif not all(_check_arg_device(x) for x in tensor_args):
268
- why_not_fast_path = ("some Tensor argument's device is neither one of "
269
- f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
270
- elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
271
- why_not_fast_path = ("grad is enabled and at least one of query or the "
272
- "input/output projection weights or biases requires_grad")
273
- if not why_not_fast_path:
274
- merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
275
-
276
- if self.in_proj_bias is not None and self.in_proj_weight is not None:
277
- return torch._native_multi_head_attention(
278
- query,
279
- key,
280
- value,
281
- self.embed_dim,
282
- self.num_heads,
283
- self.in_proj_weight,
284
- self.in_proj_bias,
285
- self.out_proj.weight,
286
- self.out_proj.bias,
287
- merged_mask,
288
- need_weights,
289
- average_attn_weights,
290
- mask_type)
291
-
292
- any_nested = query.is_nested or key.is_nested or value.is_nested
293
- assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
294
- f"The fast path was not hit because {why_not_fast_path}")
295
-
296
- if self.batch_first and is_batched:
297
- # make sure that the transpose op does not affect the "is" property
298
- if key is value:
299
- if query is key:
300
- query = key = value = query.transpose(1, 0)
301
- else:
302
- query, key = (x.transpose(1, 0) for x in (query, key))
303
- value = key
304
- else:
305
- query, key, value = (x.transpose(1, 0) for x in (query, key, value))
306
-
307
- if not self._qkv_same_embed_dim:
308
- attn_output, attn_output_weights = self.multi_head_attention_forward(
309
- query, key, value, self.embed_dim, self.num_heads,
310
- self.in_proj_weight, self.in_proj_bias,
311
- self.bias_k, self.bias_v, self.add_zero_attn,
312
- self.dropout, self.out_proj.weight, self.out_proj.bias,
313
- training=self.training,
314
- key_padding_mask=key_padding_mask, need_weights=need_weights,
315
- attn_mask=attn_mask,
316
- use_separate_proj_weight=True,
317
- q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
318
- v_proj_weight=self.v_proj_weight,
319
- average_attn_weights=average_attn_weights,
320
- is_causal=is_causal)
321
- else:
322
- attn_output, attn_output_weights = self.multi_head_attention_forward(
323
- query, key, value, self.embed_dim, self.num_heads,
324
- self.in_proj_weight, self.in_proj_bias,
325
- self.bias_k, self.bias_v, self.add_zero_attn,
326
- self.dropout, self.out_proj.weight, self.out_proj.bias,
327
- training=self.training,
328
- key_padding_mask=key_padding_mask,
329
- need_weights=need_weights,
330
- attn_mask=attn_mask,
331
- average_attn_weights=average_attn_weights,
332
- is_causal=is_causal)
333
- if self.batch_first and is_batched:
334
- return attn_output.transpose(1, 0), attn_output_weights
335
- else:
336
- return attn_output, attn_output_weights
337
-
338
- def multi_head_attention_forward(
339
- self,
340
- query: Tensor,
341
- key: Tensor,
342
- value: Tensor,
343
- embed_dim_to_check: int,
344
- num_heads: int,
345
- in_proj_weight: Optional[Tensor],
346
- in_proj_bias: Optional[Tensor],
347
- bias_k: Optional[Tensor],
348
- bias_v: Optional[Tensor],
349
- add_zero_attn: bool,
350
- dropout_p: float,
351
- out_proj_weight: Tensor,
352
- out_proj_bias: Optional[Tensor],
353
- training: bool = True,
354
- key_padding_mask: Optional[Tensor] = None,
355
- need_weights: bool = True,
356
- attn_mask: Optional[Tensor] = None,
357
- use_separate_proj_weight: bool = False,
358
- q_proj_weight: Optional[Tensor] = None,
359
- k_proj_weight: Optional[Tensor] = None,
360
- v_proj_weight: Optional[Tensor] = None,
361
- static_k: Optional[Tensor] = None,
362
- static_v: Optional[Tensor] = None,
363
- average_attn_weights: bool = True,
364
- is_causal: bool = False,
365
- ) -> Tuple[Tensor, Optional[Tensor]]:
366
- tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
367
-
368
- is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
369
-
370
- # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
371
- # is batched, run the computation and before returning squeeze the
372
- # batch dimension so that the output doesn't carry this temporary batch dimension.
373
- if not is_batched:
374
- # unsqueeze if the input is unbatched
375
- query = query.unsqueeze(1)
376
- key = key.unsqueeze(1)
377
- value = value.unsqueeze(1)
378
- if key_padding_mask is not None:
379
- key_padding_mask = key_padding_mask.unsqueeze(0)
380
-
381
- # set up shape vars
382
- tgt_len, bsz, embed_dim = query.shape
383
- src_len, _, _ = key.shape
384
-
385
- key_padding_mask = _canonical_mask(
386
- mask=key_padding_mask,
387
- mask_name="key_padding_mask",
388
- other_type=_none_or_dtype(attn_mask),
389
- other_name="attn_mask",
390
- target_type=query.dtype
391
- )
392
-
393
- if is_causal and attn_mask is None:
394
- raise RuntimeError(
395
- "Need attn_mask if specifying the is_causal hint. "
396
- "You may use the Transformer module method "
397
- "`generate_square_subsequent_mask` to create this mask."
398
- )
399
-
400
- if is_causal and key_padding_mask is None and not need_weights:
401
- # when we have a kpm or need weights, we need attn_mask
402
- # Otherwise, we use the is_causal hint go as is_causal
403
- # indicator to SDPA.
404
- attn_mask = None
405
- else:
406
- attn_mask = _canonical_mask(
407
- mask=attn_mask,
408
- mask_name="attn_mask",
409
- other_type=None,
410
- other_name="",
411
- target_type=query.dtype,
412
- check_other=False,
413
- )
414
-
415
- if key_padding_mask is not None:
416
- # We have the attn_mask, and use that to merge kpm into it.
417
- # Turn off use of is_causal hint, as the merged mask is no
418
- # longer causal.
419
- is_causal = False
420
-
421
- assert embed_dim == embed_dim_to_check, \
422
- f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
423
- if isinstance(embed_dim, torch.Tensor):
424
- # embed_dim can be a tensor when JIT tracing
425
- head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
426
- else:
427
- head_dim = embed_dim // num_heads
428
- assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
429
- if use_separate_proj_weight:
430
- # allow MHA to have different embedding dimensions when separate projection weights are used
431
- assert key.shape[:2] == value.shape[:2], \
432
- f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
433
- else:
434
- assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
435
-
436
- #
437
- # compute in-projection
438
- #
439
- if not use_separate_proj_weight:
440
- assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
441
- q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
442
- else:
443
- assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
444
- assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
445
- assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
446
- if in_proj_bias is None:
447
- b_q = b_k = b_v = None
448
- else:
449
- b_q, b_k, b_v = in_proj_bias.chunk(3)
450
- q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
451
-
452
- # prep attention mask
453
-
454
- if attn_mask is not None:
455
- # ensure attn_mask's dim is 3
456
- if attn_mask.dim() == 2:
457
- correct_2d_size = (tgt_len, src_len)
458
- if attn_mask.shape != correct_2d_size:
459
- raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
460
- attn_mask = attn_mask.unsqueeze(0)
461
- elif attn_mask.dim() == 3:
462
- correct_3d_size = (bsz * num_heads, tgt_len, src_len)
463
- if attn_mask.shape != correct_3d_size:
464
- raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
465
- else:
466
- raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
467
-
468
- # add bias along batch dimension (currently second)
469
- if bias_k is not None and bias_v is not None:
470
- assert static_k is None, "bias cannot be added to static key."
471
- assert static_v is None, "bias cannot be added to static value."
472
- k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
473
- v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
474
- if attn_mask is not None:
475
- attn_mask = pad(attn_mask, (0, 1))
476
- if key_padding_mask is not None:
477
- key_padding_mask = pad(key_padding_mask, (0, 1))
478
- else:
479
- assert bias_k is None
480
- assert bias_v is None
481
-
482
- #
483
- # reshape q, k, v for multihead attention and make em batch first
484
- #
485
- q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
486
- if static_k is None:
487
- k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
488
- else:
489
- # TODO finish disentangling control flow so we don't do in-projections when statics are passed
490
- assert static_k.size(0) == bsz * num_heads, \
491
- f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
492
- assert static_k.size(2) == head_dim, \
493
- f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
494
- k = static_k
495
- if static_v is None:
496
- v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
497
- else:
498
- # TODO finish disentangling control flow so we don't do in-projections when statics are passed
499
- assert static_v.size(0) == bsz * num_heads, \
500
- f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
501
- assert static_v.size(2) == head_dim, \
502
- f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
503
- v = static_v
504
-
505
- # add zero attention along batch dimension (now first)
506
- if add_zero_attn:
507
- zero_attn_shape = (bsz * num_heads, 1, head_dim)
508
- k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
509
- v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
510
- if attn_mask is not None:
511
- attn_mask = pad(attn_mask, (0, 1))
512
- if key_padding_mask is not None:
513
- key_padding_mask = pad(key_padding_mask, (0, 1))
514
-
515
- # update source sequence length after adjustments
516
- src_len = k.size(1)
517
-
518
- # merge key padding and attention masks
519
- if key_padding_mask is not None:
520
- assert key_padding_mask.shape == (bsz, src_len), \
521
- f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
522
- key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
523
- expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
524
- if attn_mask is None:
525
- attn_mask = key_padding_mask
526
- else:
527
- attn_mask = attn_mask + key_padding_mask
528
-
529
- # adjust dropout probability
530
- if not training:
531
- dropout_p = 0.0
532
-
533
- #
534
- # (deep breath) calculate attention and out projection
535
- #
536
-
537
- if need_weights:
538
- B, Nt, E = q.shape
539
- q_scaled = q / math.sqrt(E)
540
-
541
- assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
542
-
543
- if attn_mask is not None:
544
- attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
545
- else:
546
- attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
547
- attn_output_weights = softmax(attn_output_weights, dim=-1)
548
- if dropout_p > 0.0:
549
- attn_output_weights = dropout(attn_output_weights, p=dropout_p)
550
-
551
- attn_output = torch.bmm(attn_output_weights, v)
552
-
553
- attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
554
- attn_output = self.out_proj(attn_output)
555
- attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
556
-
557
- # optionally average attention weights over heads
558
- attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
559
- if average_attn_weights:
560
- attn_output_weights = attn_output_weights.mean(dim=1)
561
-
562
- if not is_batched:
563
- # squeeze the output if input was unbatched
564
- attn_output = attn_output.squeeze(1)
565
- attn_output_weights = attn_output_weights.squeeze(0)
566
- return attn_output, attn_output_weights
567
- else:
568
- # attn_mask can be either (L,S) or (N*num_heads, L, S)
569
- # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
570
- # in order to match the input for SDPA of (N, num_heads, L, S)
571
- if attn_mask is not None:
572
- if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
573
- attn_mask = attn_mask.unsqueeze(0)
574
- else:
575
- attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
576
-
577
- q = q.view(bsz, num_heads, tgt_len, head_dim)
578
- k = k.view(bsz, num_heads, src_len, head_dim)
579
- v = v.view(bsz, num_heads, src_len, head_dim)
580
-
581
- attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
582
- attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
583
-
584
- attn_output = self.out_proj(attn_output)
585
- attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
586
- if not is_batched:
587
- # squeeze the output if input was unbatched
588
- attn_output = attn_output.squeeze(1)
589
- return attn_output, None
590
-
591
-
592
- def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
593
- key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
594
- # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
595
- # and returns if the input is batched or not.
596
- # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
597
-
598
- # Shape check.
599
- if query.dim() == 3:
600
- # Batched Inputs
601
- is_batched = True
602
- assert key.dim() == 3 and value.dim() == 3, \
603
- ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
604
- f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
605
- if key_padding_mask is not None:
606
- assert key_padding_mask.dim() == 2, \
607
- ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
608
- f" but found {key_padding_mask.dim()}-D tensor instead")
609
- if attn_mask is not None:
610
- assert attn_mask.dim() in (2, 3), \
611
- ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
612
- f" but found {attn_mask.dim()}-D tensor instead")
613
- elif query.dim() == 2:
614
- # Unbatched Inputs
615
- is_batched = False
616
- assert key.dim() == 2 and value.dim() == 2, \
617
- ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
618
- f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
619
-
620
- if key_padding_mask is not None:
621
- assert key_padding_mask.dim() == 1, \
622
- ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
623
- f" but found {key_padding_mask.dim()}-D tensor instead")
624
-
625
- if attn_mask is not None:
626
- assert attn_mask.dim() in (2, 3), \
627
- ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
628
- f" but found {attn_mask.dim()}-D tensor instead")
629
- if attn_mask.dim() == 3:
630
- expected_shape = (num_heads, query.shape[0], key.shape[0])
631
- assert attn_mask.shape == expected_shape, \
632
- (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
633
- else:
634
- raise AssertionError(
635
- f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
636
-
637
- return is_batched
638
-
639
-
640
- def _canonical_mask(
641
- mask: Optional[Tensor],
642
- mask_name: str,
643
- other_type: Optional[DType],
644
- other_name: str,
645
- target_type: DType,
646
- check_other: bool = True,
647
- ) -> Optional[Tensor]:
648
-
649
- if mask is not None:
650
- _mask_dtype = mask.dtype
651
- _mask_is_float = torch.is_floating_point(mask)
652
- if _mask_dtype != torch.bool and not _mask_is_float:
653
- raise AssertionError(
654
- f"only bool and floating types of {mask_name} are supported")
655
- if check_other and other_type is not None:
656
- if _mask_dtype != other_type:
657
- warnings.warn(
658
- f"Support for mismatched {mask_name} and {other_name} "
659
- "is deprecated. Use same type for both instead."
660
- )
661
- if not _mask_is_float:
662
- mask = (
663
- torch.zeros_like(mask, dtype=target_type)
664
- .masked_fill_(mask, float("-inf"))
665
- )
666
- return mask
667
-
668
-
669
- def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
670
- if input is None:
671
- return None
672
- elif isinstance(input, torch.Tensor):
673
- return input.dtype
674
- raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
675
-
676
- def _in_projection_packed(
677
- q: Tensor,
678
- k: Tensor,
679
- v: Tensor,
680
- w: Tensor,
681
- b: Optional[Tensor] = None,
682
- ) -> List[Tensor]:
683
- r"""
684
- Performs the in-projection step of the attention operation, using packed weights.
685
- Output is a triple containing projection tensors for query, key and value.
686
- Args:
687
- q, k, v: query, key and value tensors to be projected. For self-attention,
688
- these are typically the same tensor; for encoder-decoder attention,
689
- k and v are typically the same tensor. (We take advantage of these
690
- identities for performance if they are present.) Regardless, q, k and v
691
- must share a common embedding dimension; otherwise their shapes may vary.
692
- w: projection weights for q, k and v, packed into a single tensor. Weights
693
- are packed along dimension 0, in q, k, v order.
694
- b: optional projection biases for q, k and v, packed into a single tensor
695
- in q, k, v order.
696
- Shape:
697
- Inputs:
698
- - q: :math:`(..., E)` where E is the embedding dimension
699
- - k: :math:`(..., E)` where E is the embedding dimension
700
- - v: :math:`(..., E)` where E is the embedding dimension
701
- - w: :math:`(E * 3, E)` where E is the embedding dimension
702
- - b: :math:`E * 3` where E is the embedding dimension
703
- Output:
704
- - in output list :math:`[q', k', v']`, each output tensor will have the
705
- same shape as the corresponding input tensor.
706
- """
707
- E = q.size(-1)
708
- if k is v:
709
- if q is k:
710
- # self-attention
711
- proj = linear(q, w, b)
712
- # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
713
- proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
714
- return proj[0], proj[1], proj[2]
715
- else:
716
- # encoder-decoder attention
717
- w_q, w_kv = w.split([E, E * 2])
718
- if b is None:
719
- b_q = b_kv = None
720
- else:
721
- b_q, b_kv = b.split([E, E * 2])
722
- q_proj = linear(q, w_q, b_q)
723
- kv_proj = linear(k, w_kv, b_kv)
724
- # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
725
- kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
726
- return (q_proj, kv_proj[0], kv_proj[1])
727
- else:
728
- w_q, w_k, w_v = w.chunk(3)
729
- if b is None:
730
- b_q = b_k = b_v = None
731
- else:
732
- b_q, b_k, b_v = b.chunk(3)
733
- return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
734
-
735
-
736
- def _in_projection(
737
- q: Tensor,
738
- k: Tensor,
739
- v: Tensor,
740
- w_q: Tensor,
741
- w_k: Tensor,
742
- w_v: Tensor,
743
- b_q: Optional[Tensor] = None,
744
- b_k: Optional[Tensor] = None,
745
- b_v: Optional[Tensor] = None,
746
- ) -> Tuple[Tensor, Tensor, Tensor]:
747
- r"""
748
- Performs the in-projection step of the attention operation. This is simply
749
- a triple of linear projections, with shape constraints on the weights which
750
- ensure embedding dimension uniformity in the projected outputs.
751
- Output is a triple containing projection tensors for query, key and value.
752
- Args:
753
- q, k, v: query, key and value tensors to be projected.
754
- w_q, w_k, w_v: weights for q, k and v, respectively.
755
- b_q, b_k, b_v: optional biases for q, k and v, respectively.
756
- Shape:
757
- Inputs:
758
- - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
759
- number of leading dimensions.
760
- - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
761
- number of leading dimensions.
762
- - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
763
- number of leading dimensions.
764
- - w_q: :math:`(Eq, Eq)`
765
- - w_k: :math:`(Eq, Ek)`
766
- - w_v: :math:`(Eq, Ev)`
767
- - b_q: :math:`(Eq)`
768
- - b_k: :math:`(Eq)`
769
- - b_v: :math:`(Eq)`
770
- Output: in output triple :math:`(q', k', v')`,
771
- - q': :math:`[Qdims..., Eq]`
772
- - k': :math:`[Kdims..., Eq]`
773
- - v': :math:`[Vdims..., Eq]`
774
- """
775
- Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
776
- assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
777
- assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
778
- assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
779
- assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
780
- assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
781
- assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
782
- return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)