Lanni-ni commited on
Commit
15063d0
·
verified ·
1 Parent(s): 8a00f5e

add remote code + model files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/configuration_forgetting_transformer-checkpoint.py +84 -0
  2. .ipynb_checkpoints/fgate_cache-checkpoint.py +143 -0
  3. .ipynb_checkpoints/fgate_cache.py-checkpoint.backup +203 -0
  4. .ipynb_checkpoints/modeling_forgetting_transformer-checkpoint.py +910 -0
  5. __init__.py +1 -0
  6. __pycache__/__init__.cpython-310.pyc +0 -0
  7. __pycache__/configuration_forgetting_transformer.cpython-310.pyc +0 -0
  8. __pycache__/fgate_cache.cpython-310.pyc +0 -0
  9. __pycache__/glu_linear.cpython-310.pyc +0 -0
  10. __pycache__/modeling_forgetting_transformer.cpython-310.pyc +0 -0
  11. __pycache__/token_shift.cpython-310.pyc +0 -0
  12. configuration_forgetting_transformer.py +84 -0
  13. fgate_cache.py +143 -0
  14. fgate_cache.py.backup +203 -0
  15. glu_linear.py +61 -0
  16. modeling_forgetting_transformer.py +910 -0
  17. ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py +1138 -0
  18. ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py +72 -0
  19. ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py +179 -0
  20. ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py +88 -0
  21. ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py +117 -0
  22. ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py +171 -0
  23. ops/__init__.py +3 -0
  24. ops/__pycache__/__init__.cpython-310.pyc +0 -0
  25. ops/__pycache__/direction_sensitive_geometric.cpython-310.pyc +0 -0
  26. ops/__pycache__/forgetting_attention.cpython-310.pyc +0 -0
  27. ops/__pycache__/forgetting_attention_std.cpython-310.pyc +0 -0
  28. ops/__pycache__/framework_mock.cpython-310.pyc +0 -0
  29. ops/__pycache__/geometric_attention_final.cpython-310.pyc +0 -0
  30. ops/__pycache__/geometric_attention_std.cpython-310.pyc +0 -0
  31. ops/__pycache__/layer_with_visualization.cpython-310.pyc +0 -0
  32. ops/__pycache__/multi_head_attention.cpython-310.pyc +0 -0
  33. ops/__pycache__/multi_head_relative_pos_attention.cpython-310.pyc +0 -0
  34. ops/__pycache__/sliding_window_attention_std.cpython-310.pyc +0 -0
  35. ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc +0 -0
  36. ops/__pycache__/vanilla_attention_std.cpython-310.pyc +0 -0
  37. ops/direction_sensitive_geometric.py +115 -0
  38. ops/direction_sensitive_geometric.py.bak +115 -0
  39. ops/forgetting_attention.py +1138 -0
  40. ops/forgetting_attention_std.py +72 -0
  41. ops/framework_mock.py +25 -0
  42. ops/geometric_attention/__init__.py +1 -0
  43. ops/geometric_attention/__pycache__/__init__.cpython-310.pyc +0 -0
  44. ops/geometric_attention/__pycache__/cuda_interface.cpython-310.pyc +0 -0
  45. ops/geometric_attention/cuda_interface.cu +177 -0
  46. ops/geometric_attention/cuda_interface.py +93 -0
  47. ops/geometric_attention/cuda_interface.py.bak +94 -0
  48. ops/geometric_attention_final.py +109 -0
  49. ops/geometric_attention_std.py +179 -0
  50. ops/layer_with_visualization.py +43 -0
.ipynb_checkpoints/configuration_forgetting_transformer-checkpoint.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+ class ForgettingTransformerConfig(PretrainedConfig):
6
+ model_type = 'forgetting_transformer'
7
+ keys_to_ignore_at_inference = ['past_key_values']
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size: int = 32000,
12
+ hidden_size: int = 2048,
13
+ hidden_ratio: Optional[float] = 4,
14
+ intermediate_size: Optional[int] = None,
15
+ num_hidden_layers: int = 24,
16
+ num_heads: int = 32,
17
+ num_kv_heads: int = None,
18
+ hidden_act: str = "swish",
19
+ window_size: Optional[int] = None,
20
+ max_position_embeddings: int = 2048,
21
+ initializer_range: float = 0.02,
22
+ elementwise_affine: Optional[bool] = True,
23
+ norm_eps: float = 1e-6,
24
+ use_cache: bool = True,
25
+ pad_token_id: int = None,
26
+ bos_token_id: int = 1,
27
+ eos_token_id: int = 2,
28
+ tie_word_embeddings: bool = False,
29
+ attention_bias: bool = False,
30
+ fuse_norm: bool = True,
31
+ fuse_cross_entropy: bool = True,
32
+ rope_base: float = 500000.0,
33
+ use_rope: bool = False,
34
+ use_output_gate: bool = False,
35
+ ogate_act: str = "sigmoid",
36
+ fgate_type: str = "full",
37
+ fgate_bias_init: bool = False,
38
+ decay_time_min: Optional[float] = None,
39
+ decay_time_max: Optional[float] = None,
40
+ use_output_norm: bool = False,
41
+ qk_norm: bool = False,
42
+ qk_norm_share_param_across_head: bool = False,
43
+ use_k_shift: bool = False,
44
+ use_v_shift: bool = False,
45
+ **kwargs,
46
+ ):
47
+ self.vocab_size = vocab_size
48
+ self.hidden_size = hidden_size
49
+ self.hidden_ratio = hidden_ratio
50
+ self.intermediate_size = intermediate_size
51
+ self.num_hidden_layers = num_hidden_layers
52
+ self.num_heads = num_heads
53
+ self.num_kv_heads = num_kv_heads
54
+ self.window_size = window_size
55
+ self.max_position_embeddings = max_position_embeddings
56
+ self.hidden_act = hidden_act
57
+ self.initializer_range = initializer_range
58
+ self.elementwise_affine = elementwise_affine
59
+ self.norm_eps = norm_eps
60
+ self.use_cache = use_cache
61
+ self.attention_bias = attention_bias
62
+ self.fuse_cross_entropy = fuse_cross_entropy
63
+ self.fuse_norm = fuse_norm
64
+ self.rope_base = rope_base
65
+ self.use_rope = use_rope
66
+ self.use_output_gate = use_output_gate
67
+ self.ogate_act = ogate_act
68
+ self.fgate_type = fgate_type
69
+ self.fgate_bias_init = fgate_bias_init
70
+ self.decay_time_min = decay_time_min
71
+ self.decay_time_max = decay_time_max
72
+ self.use_output_norm = use_output_norm
73
+ self.qk_norm = qk_norm
74
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
75
+ self.use_k_shift = use_k_shift
76
+ self.use_v_shift = use_v_shift
77
+
78
+ super().__init__(
79
+ pad_token_id=pad_token_id,
80
+ bos_token_id=bos_token_id,
81
+ eos_token_id=eos_token_id,
82
+ tie_word_embeddings=tie_word_embeddings,
83
+ **kwargs,
84
+ )
.ipynb_checkpoints/fgate_cache-checkpoint.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional, Any, Dict
2
+ import torch
3
+
4
+ class FgateDynamicCache:
5
+ """
6
+ A cache that grows dynamically as more tokens are generated.
7
+ Custom cache for Forgetting Transformer that does not inherit from transformers.Cache.
8
+ """
9
+
10
+ def __init__(self, num_hidden_layers: Optional[int] = None) -> None:
11
+ self.key_cache: List[torch.Tensor] = []
12
+ self.value_cache: List[torch.Tensor] = []
13
+ self.log_fgate_cache: List[torch.Tensor] = []
14
+ self.key_shift_cache: List[torch.Tensor] = []
15
+ self.value_shift_cache: List[torch.Tensor] = []
16
+ self._seen_tokens = 0
17
+
18
+ def update_shift_cache(
19
+ self,
20
+ key_shift_state: torch.Tensor,
21
+ value_shift_state: torch.Tensor,
22
+ layer_idx,
23
+ ):
24
+ assert layer_idx == len(self.key_shift_cache) == len(self.value_shift_cache)
25
+ self.key_shift_cache.append(key_shift_state)
26
+ self.value_shift_cache.append(value_shift_state)
27
+
28
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
29
+ if layer_idx < len(self):
30
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
31
+ else:
32
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
33
+
34
+ def __iter__(self):
35
+ for layer_idx in range(len(self)):
36
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
37
+
38
+ def __len__(self):
39
+ return len(self.key_cache)
40
+
41
+ def update(
42
+ self,
43
+ key_states: torch.Tensor,
44
+ value_states: torch.Tensor,
45
+ log_fgate_states: torch.Tensor,
46
+ layer_idx: int,
47
+ cache_kwargs: Optional[Dict[str, Any]] = None,
48
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
49
+ assert log_fgate_states.ndim == 3, f"log_fgate must be (B, H, T), but get {log_fgate_states.size()}"
50
+ if layer_idx == 0:
51
+ self._seen_tokens += key_states.shape[-2]
52
+
53
+ if len(self.key_cache) <= layer_idx:
54
+ self.key_cache.append(key_states)
55
+ self.value_cache.append(value_states)
56
+ self.log_fgate_cache.append(log_fgate_states)
57
+ else:
58
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
59
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
60
+ self.log_fgate_cache[layer_idx] = torch.cat([self.log_fgate_cache[layer_idx], log_fgate_states], dim=-1)
61
+
62
+ return self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]
63
+
64
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
65
+ if len(self.key_cache) <= layer_idx:
66
+ return 0
67
+ return self.key_cache[layer_idx].shape[-2]
68
+
69
+ def get_max_length(self) -> Optional[int]:
70
+ return None
71
+
72
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], ...]:
73
+ legacy_cache = ()
74
+ for layer_idx in range(len(self)):
75
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]),)
76
+ return legacy_cache
77
+
78
+ @classmethod
79
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_layers: Optional[int] = None) -> "FgateDynamicCache":
80
+ """
81
+ Converts a cache in the legacy cache format into an equivalent FgateDynamicCache.
82
+
83
+ Args:
84
+ past_key_values: Optional legacy cache format
85
+ num_layers: Not used in this implementation
86
+
87
+ Returns:
88
+ FgateDynamicCache instance
89
+ """
90
+ cache = cls()
91
+
92
+ if past_key_values is not None:
93
+ for layer_idx in range(len(past_key_values)):
94
+ key_states, value_states, log_fgate_states = past_key_values[layer_idx]
95
+ cache.update(key_states, value_states, log_fgate_states, layer_idx)
96
+
97
+ return cache
98
+
99
+ def crop(self, max_length: int):
100
+ if max_length < 0:
101
+ max_length = self.get_seq_length() - abs(max_length)
102
+
103
+ if self.get_seq_length() <= max_length:
104
+ return
105
+
106
+ self._seen_tokens = max_length
107
+ for idx in range(len(self.key_cache)):
108
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
109
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
110
+ self.log_fgate_cache[idx] = self.log_fgate_cache[idx][..., :max_length]
111
+
112
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["FgateDynamicCache"]:
113
+ out = []
114
+ for i in range(0, full_batch_size, split_size):
115
+ current_split = FgateDynamicCache()
116
+ current_split._seen_tokens = self._seen_tokens
117
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
118
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
119
+ current_split.log_fgate_cache = [tensor[i : i + split_size] for tensor in self.log_fgate_cache]
120
+ out.append(current_split)
121
+ return out
122
+
123
+ @classmethod
124
+ def from_batch_splits(cls, splits: List["FgateDynamicCache"]) -> "FgateDynamicCache":
125
+ cache = cls()
126
+ for idx in range(len(splits[0])):
127
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
128
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
129
+ layer_log_fgates = torch.cat([current.log_fgate_cache[idx] for current in splits], dim=0)
130
+ cache.update(layer_keys, layer_values, layer_log_fgates, idx)
131
+ return cache
132
+
133
+ def batch_repeat_interleave(self, repeats: int):
134
+ for layer_idx in range(len(self)):
135
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
136
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
137
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx].repeat_interleave(repeats, dim=0)
138
+
139
+ def batch_select_indices(self, indices: torch.Tensor):
140
+ for layer_idx in range(len(self)):
141
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
142
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
143
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx][indices, ...]
.ipynb_checkpoints/fgate_cache.py-checkpoint.backup ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional, Any, Dict
2
+ import torch
3
+ from transformers.cache_utils import Cache
4
+
5
+ class FgateDynamicCache(Cache):
6
+ """
7
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
8
+
9
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
10
+ `[batch_size, num_heads, seq_len, head_dim]`.
11
+
12
+ Example:
13
+
14
+ ```python
15
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
16
+
17
+ >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
18
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
19
+
20
+ >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
21
+
22
+ >>> # Prepare a cache class and pass it to model's forward
23
+ >>> past_key_values = DynamicCache()
24
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
25
+ >>> outputs.past_key_values # access cache filled with key/values from generation
26
+ DynamicCache()
27
+ ```
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ super().__init__()
32
+ self.key_cache: List[torch.Tensor] = []
33
+ self.value_cache: List[torch.Tensor] = []
34
+ self.log_fgate_cache: List[torch.Tensor] = []
35
+
36
+ self.key_shift_cache: List[torch.Tensor] = []
37
+ self.value_shift_cache: List[torch.Tensor] = []
38
+
39
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
40
+
41
+ def update_shift_cache(
42
+ self,
43
+ key_shift_state: torch.Tensor,
44
+ value_shift_state: torch.Tensor,
45
+ layer_idx,
46
+ ):
47
+ assert layer_idx == len(self.key_shift_cache) == len(self.value_shift_cache)
48
+ self.key_shift_cache.append(key_shift_state)
49
+ self.value_shift_cache.append(value_shift_state)
50
+
51
+
52
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
53
+ """
54
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
55
+ sequence length.
56
+ """
57
+ if layer_idx < len(self):
58
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
59
+ else:
60
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
61
+
62
+ def __iter__(self):
63
+ """
64
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
65
+ keys and values
66
+ """
67
+ for layer_idx in range(len(self)):
68
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
69
+
70
+ def __len__(self):
71
+ """
72
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
73
+ to the number of layers in the model.
74
+ """
75
+ return len(self.key_cache)
76
+
77
+ def update(
78
+ self,
79
+ key_states: torch.Tensor,
80
+ value_states: torch.Tensor,
81
+ log_fgate_states: torch.Tensor,
82
+ layer_idx: int,
83
+ cache_kwargs: Optional[Dict[str, Any]] = None,
84
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
85
+ """
86
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
87
+
88
+ Parameters:
89
+ key_states (`torch.Tensor`):
90
+ The new key states to cache.
91
+ value_states (`torch.Tensor`):
92
+ The new value states to cache.
93
+ layer_idx (`int`):
94
+ The index of the layer to cache the states for.
95
+ cache_kwargs (`Dict[str, Any]`, `optional`):
96
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
97
+
98
+ Return:
99
+ A tuple containing the updated key and value states.
100
+ """
101
+ assert log_fgate_states.ndim == 3, f"log_fgate must be (B, H, T), but get {log_fgate_states.size()}"
102
+ # Update the number of seen tokens
103
+ if layer_idx == 0:
104
+ self._seen_tokens += key_states.shape[-2]
105
+
106
+ # Update the cache
107
+ if len(self.key_cache) <= layer_idx:
108
+ self.key_cache.append(key_states)
109
+ self.value_cache.append(value_states)
110
+ self.log_fgate_cache.append(log_fgate_states)
111
+ else:
112
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
113
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
114
+ self.log_fgate_cache[layer_idx] = torch.cat([self.log_fgate_cache[layer_idx], log_fgate_states], dim=-1)
115
+
116
+ return self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]
117
+
118
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
119
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
120
+ # TODO: deprecate this function in favor of `cache_position`
121
+ if len(self.key_cache) <= layer_idx:
122
+ return 0
123
+ return self.key_cache[layer_idx].shape[-2]
124
+
125
+ def get_max_length(self) -> Optional[int]:
126
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
127
+ return None
128
+
129
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
130
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
131
+ backward compatibility."""
132
+ legacy_cache = ()
133
+ for layer_idx in range(len(self)):
134
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]),)
135
+ return legacy_cache
136
+
137
+ @classmethod
138
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_layers: Optional[int] = None) -> "DynamicCache":
139
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
140
+ backward compatibility."""
141
+ raise NotImplementedError
142
+ assert num_layers is not None
143
+ cache = cls(num_layers)
144
+ if past_key_values is not None:
145
+ for layer_idx in range(len(past_key_values)):
146
+ key_states, value_states, log_fgate_states = past_key_values[layer_idx]
147
+ cache.update(key_states, value_states, log_fgate_states, layer_idx)
148
+ return cache
149
+
150
+ def crop(self, max_length: int):
151
+ """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
152
+ negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
153
+ # In case it is negative
154
+ if max_length < 0:
155
+ max_length = self.get_seq_length() - abs(max_length)
156
+
157
+ if self.get_seq_length() <= max_length:
158
+ return
159
+
160
+ self._seen_tokens = max_length
161
+ for idx in range(len(self.key_cache)):
162
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
163
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
164
+ self.log_fgate_cache[idx] = self.log_fgate_cache[idx][..., :max_length]
165
+
166
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["DynamicCache"]:
167
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
168
+ `_split_model_inputs()` in `generation.utils`"""
169
+ out = []
170
+ for i in range(0, full_batch_size, split_size):
171
+ current_split = DynamicCache()
172
+ current_split._seen_tokens = self._seen_tokens
173
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
174
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
175
+ current_split.log_fgate_cache = [tensor[i : i + split_size] for tensor in self.log_fgate_cache]
176
+ out.append(current_split)
177
+ return out
178
+
179
+ @classmethod
180
+ def from_batch_splits(cls, splits: List["DynamicCache"]) -> "DynamicCache":
181
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
182
+ `generation.utils`"""
183
+ cache = cls()
184
+ for idx in range(len(splits[0])):
185
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
186
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
187
+ layer_log_fgates = torch.cat([current.log_fgate_cache[idx] for current in splits], dim=0)
188
+ cache.update(layer_keys, layer_values, layer_log_fgates, idx)
189
+ return cache
190
+
191
+ def batch_repeat_interleave(self, repeats: int):
192
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
193
+ for layer_idx in range(len(self)):
194
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
195
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
196
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx].repeat_interleave(repeats, dim=0)
197
+
198
+ def batch_select_indices(self, indices: torch.Tensor):
199
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
200
+ for layer_idx in range(len(self)):
201
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
202
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
203
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx][indices, ...]
.ipynb_checkpoints/modeling_forgetting_transformer-checkpoint.py ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.cache_utils import Cache
14
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast)
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import logging
18
+
19
+ # from fla.layers.attn import Attention
20
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
21
+ from fla.modules.layernorm import group_norm_fn
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from einops import rearrange
26
+
27
+ # 动态导入配置类以支持本地和HuggingFace Hub加载
28
+ try:
29
+ from .configuration_forgetting_transformer import ForgettingTransformerConfig
30
+ except (ImportError, ValueError):
31
+ try:
32
+ from configuration_forgetting_transformer import ForgettingTransformerConfig
33
+ except ImportError:
34
+ from forgetting_transformer.model.forgetting_transformer.configuration_forgetting_transformer import ForgettingTransformerConfig
35
+
36
+ from forgetting_transformer.ops.forgetting_attention_std import forgetting_attention_std as forgetting_attention
37
+ from .fgate_cache import FgateDynamicCache
38
+ from .glu_linear import glu_linear
39
+ from .token_shift import token_shift
40
+
41
+ from functools import partial
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ class ShiftLinear(nn.Module):
47
+
48
+ def __init__(
49
+ self,
50
+ input_dim: int,
51
+ output_dim: int,
52
+ num_heads: int,
53
+ bias: bool,
54
+ shift_bias: bool = False
55
+ ):
56
+ super().__init__()
57
+
58
+ self.input_dim = input_dim
59
+ self.output_dim = output_dim
60
+ self.num_heads = num_heads
61
+ assert self.output_dim % self.num_heads == 0
62
+
63
+ self.linear = nn.Linear(input_dim, output_dim, bias=bias)
64
+ self.shift_proj = nn.Linear(input_dim, num_heads, bias=shift_bias)
65
+
66
+ def __repr__(self) -> str:
67
+ s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim})"
68
+ return s
69
+
70
+ def forward(self, x: torch.Tensor, shift_state: Optional[torch.Tensor]) -> torch.Tensor:
71
+ assert x.ndim == 3, "Input must be (B, T, D)"
72
+ B, T, D = x.size()
73
+ out = self.linear(x)
74
+ # (B, T, H, 1)
75
+ alpha = torch.sigmoid(self.shift_proj(x).float()).float()
76
+ # left, right, top, bottom (B, T=H, D=W)
77
+ # out_prev = nn.functional.pad(out, (0, 0, 1, -1))
78
+ # out_prev = torch.roll(out, shifts=1, dims=1)
79
+
80
+ out_per_head = rearrange(out, 'b t (h d) -> b t h d', h=self.num_heads)
81
+ if T > 1:
82
+ # TODO: note in this case cache is not used
83
+ result_per_head = token_shift(out_per_head, alpha, 1.0 - alpha)
84
+ else:
85
+ shift_state_per_head = rearrange(shift_state, 'b (h d) -> b 1 h d', h=self.num_heads)
86
+ result_per_head = (alpha[..., None] * shift_state_per_head + (1 - alpha[..., None]) * out_per_head)
87
+
88
+ result_per_head = result_per_head.to(out.dtype)
89
+
90
+ if shift_state is not None:
91
+ shift_state.copy_(out[:, -1, :])
92
+
93
+ result = rearrange(result_per_head, 'b t h d -> b t (h d)', h=self.num_heads)
94
+ return result
95
+
96
+ class GroupRMSNorm(nn.Module):
97
+ def __init__(
98
+ self,
99
+ num_groups: int,
100
+ hidden_size: int,
101
+ elementwise_affine: bool = True,
102
+ bias: bool = False,
103
+ eps: float = 1e-5
104
+ ) -> GroupRMSNorm:
105
+ super().__init__()
106
+
107
+ if hidden_size % num_groups != 0:
108
+ raise ValueError('num_channels must be divisible by num_groups')
109
+
110
+ self.num_groups = num_groups
111
+ self.hidden_size = hidden_size
112
+ self.elementwise_affine = elementwise_affine
113
+ self.eps = eps
114
+
115
+ self.register_parameter("weight", None)
116
+ self.register_parameter("bias", None)
117
+ if elementwise_affine:
118
+ self.weight = nn.Parameter(torch.ones(hidden_size))
119
+ if bias:
120
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
121
+
122
+ def __repr__(self) -> str:
123
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
124
+ if not self.elementwise_affine:
125
+ s += f", elementwise_affine={self.elementwise_affine}"
126
+ s += f", eps={self.eps}"
127
+ s += ")"
128
+ return s
129
+
130
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
131
+ return group_norm_fn(
132
+ x,
133
+ self.weight,
134
+ self.bias,
135
+ residual=residual,
136
+ eps=self.eps,
137
+ prenorm=prenorm,
138
+ residual_in_fp32=residual_in_fp32,
139
+ is_rms_norm=True,
140
+ num_groups=self.num_groups
141
+ )
142
+
143
+ class ForgettingAttentionLayer(nn.Module):
144
+
145
+ def __init__(
146
+ self,
147
+ hidden_size: int = 2048,
148
+ num_heads: int = 32,
149
+ num_kv_heads: Optional[int] = None,
150
+ window_size: Optional[int] = None,
151
+ max_position_embeddings: Optional[int] = None,
152
+ use_rope: bool = False,
153
+ rope_base: float = 500000.0,
154
+ use_output_gate: bool = False,
155
+ ogate_act: str = "sigmoid",
156
+ fgate_type: str = "full",
157
+ fgate_bias_init: bool = False,
158
+ decay_time_min: Optional[float] = None,
159
+ decay_time_max: Optional[float] = None,
160
+ use_output_norm: bool = False,
161
+ norm_eps: float = 1e-6,
162
+ qk_norm: bool = False,
163
+ qk_norm_share_param_across_head: bool = False,
164
+ use_k_shift: bool = False,
165
+ use_v_shift: bool = False,
166
+ initializer_range: float = 0.02,
167
+ layer_idx: int = None
168
+ ):
169
+ """
170
+ Forgetting Attention layer.
171
+
172
+ Arguments:
173
+ - hidden_size: Input dimension and qkv dimension
174
+ - num_heads: Number of heads
175
+ - num_kv_heads: Not used. Should be None
176
+ - window_size: Not used. Should be None
177
+ - max_position_embeddings: Not used. Should be None
178
+ - use_rope: Whether to use RoPE. Default is False
179
+ - rope_base: the theta hyperparameter in RoPE. This has no effect if
180
+ use_rope=False
181
+ - use_output_gate: Whether to use output gates. Note that using output gates
182
+ introduces extra parameters and you may want to reduce parameters from
183
+ other components (e.g., MLPs)
184
+ - ogate_act: Activation for the output gate. Either "sigmoid" or "silu"
185
+ - fgate_type: Forget gate type. The following are supported:
186
+ - "full": The default data-dependent forget gate
187
+ - "bias_only": The data-independent forget gate
188
+ - "fixed": Forget gates with fixed values
189
+ - "none": Not using forget gates. Equivalent to forget gates with all
190
+ ones.
191
+ - fgate_bias_init: Whether to use special initalization for the bias terms in
192
+ the forget gate. This should only be used with fgate types in
193
+ ["bias_only", "fixed"].
194
+ - decay_time_min: T_min for the forget gate bias initialization. See paper
195
+ for details.
196
+ - decay_time_max: T_max for the forget gate bias initalization. See paper
197
+ for details.
198
+ - use_output_norm: Whether to use output normalization.
199
+ - norm_eps: Epsilon for the RMSNorms
200
+ - qk_norm: Whether to use qk_norm
201
+ - qk_norm_share_param_across_head: In QK-norm, whether to share the RMSNorm
202
+ scaling parameters across heads. This is just for backward compatibility.
203
+ - use_k_shift: Whether to use data-dependent key shift
204
+ - use_v_shift: Whether to use data-dependent value shift
205
+ - initializer_range: standard deviation for initialization
206
+ - layer_idx: The block index of this layer. Needed for KV-cache
207
+ """
208
+ super().__init__()
209
+
210
+ self.num_heads = num_heads
211
+ if num_kv_heads is None:
212
+ self.num_kv_heads = self.num_heads
213
+ else:
214
+ raise NotImplementedError("GQA has not been tested.")
215
+ self.num_kv_heads = num_kv_heads
216
+ self.num_kv_groups = num_heads // self.num_kv_heads
217
+ self.hidden_size = hidden_size
218
+ self.head_dim = self.hidden_size // self.num_heads
219
+ self.kv_dim = self.num_kv_heads * self.head_dim
220
+ self.kv_dim = self.num_kv_heads * self.head_dim
221
+ self.window_size = window_size
222
+ self.max_position_embeddings = max_position_embeddings
223
+ self.layer_idx = layer_idx
224
+
225
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
226
+ if use_k_shift:
227
+ self.k_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
228
+ else:
229
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
230
+
231
+ if use_v_shift:
232
+ self.v_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
233
+ else:
234
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
235
+
236
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
237
+ self.use_k_shift = use_k_shift
238
+ self.use_v_shift = use_v_shift
239
+
240
+
241
+ device = next(self.parameters()).device
242
+ # Forget gate
243
+ assert fgate_type in ["full", "bias_only", "fixed", "none"]
244
+ self.fgate_type = fgate_type
245
+ self.fgate_bias_init = fgate_bias_init
246
+ if fgate_type == "full":
247
+ assert not fgate_bias_init
248
+ self.fgate_proj = nn.Linear(self.hidden_size, self.num_heads, bias=True)
249
+ elif fgate_type == "bias_only":
250
+ self.fgate_bias = nn.Parameter(torch.zeros(size=(self.num_heads,), device=device))
251
+ self.fgate_bias._no_weight_decay = True
252
+ elif fgate_type == "fixed":
253
+ assert fgate_bias_init, "You must set fgate_bias_init = True with fixed fgate"
254
+ fgate_bias = torch.zeros(size=(self.num_heads,), device=device)
255
+ self.register_buffer("fgate_bias", fgate_bias)
256
+ elif fgate_type == "none":
257
+ pass
258
+ else:
259
+ raise ValueError(f"Unknown fgate type {fgate_type}")
260
+
261
+
262
+
263
+ # Forget gate intialization for data-independent and fixed forget gates
264
+ if fgate_bias_init:
265
+ assert decay_time_min is not None and decay_time_max is not None
266
+ assert decay_time_min > 0 and decay_time_max > 0
267
+ with torch.no_grad():
268
+ log_decay_time = torch.linspace(math.log(decay_time_min), math.log(decay_time_max), steps=self.num_heads)
269
+ decay_time = torch.exp(log_decay_time)
270
+ # Such that t = -1 / log(sigmoid(b))
271
+ bias_init = -torch.log(torch.expm1(1 / decay_time))
272
+ self.fgate_bias.copy_(bias_init)
273
+ else:
274
+ assert decay_time_min is None and decay_time_max is None
275
+
276
+ if use_output_gate:
277
+ self.ogate_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
278
+ self.ogate_act = ogate_act
279
+ assert ogate_act in ["silu", "sigmoid"]
280
+ else:
281
+ self.ogate_proj = None
282
+
283
+ if use_output_norm:
284
+ self.output_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size, eps=norm_eps)
285
+ else:
286
+ self.output_norm = None
287
+
288
+
289
+ if use_rope:
290
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
291
+ else:
292
+ self.rotary = None
293
+
294
+
295
+ self.qk_norm = qk_norm
296
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
297
+ if qk_norm:
298
+ if self.qk_norm_share_param_across_head:
299
+ # This is an incorrect implemention kept just for backward compatibility
300
+ self.q_norm = RMSNorm(self.head_dim)
301
+ self.k_norm = RMSNorm(self.head_dim)
302
+ else:
303
+ self.q_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
304
+ self.k_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
305
+
306
+ self.initializer_range = initializer_range
307
+ self.apply(self._initialize_weights)
308
+
309
+ def _initialize_weights(self, module: nn.Module):
310
+ # This will actually be overwritten by outer init.
311
+ if isinstance(module, nn.Linear):
312
+ nn.init.normal_(module.weight, mean=0.0, std=self.initializer_range)
313
+ if module.bias is not None:
314
+ nn.init.zeros_(module.bias)
315
+
316
+ def forward(
317
+ self,
318
+ hidden_states: torch.Tensor,
319
+ attention_mask: Optional[torch.LongTensor] = None,
320
+ past_key_values: Optional[Cache] = None,
321
+ output_attentions: bool = False,
322
+ use_cache: bool = False,
323
+ **kwargs,
324
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
325
+ """
326
+ We assume that during decoding attention mask is always 1. Otherwise it won't work.
327
+ """
328
+ batch_size, q_len, _ = hidden_states.size()
329
+ if use_cache:
330
+ key_shift_state = past_key_values.key_shift_cache[self.layer_idx]
331
+ value_shift_state = past_key_values.value_shift_cache[self.layer_idx]
332
+ else:
333
+ key_shift_state = value_shift_state = None
334
+
335
+ # Shift states are updated in place
336
+ q = self.q_proj(hidden_states)
337
+ if self.use_k_shift:
338
+ k = self.k_proj(hidden_states, key_shift_state)
339
+ else:
340
+ k = self.k_proj(hidden_states)
341
+ if self.use_v_shift:
342
+ v = self.v_proj(hidden_states, value_shift_state)
343
+ else:
344
+ v = self.v_proj(hidden_states)
345
+
346
+ if self.qk_norm and (not self.qk_norm_share_param_across_head):
347
+ q = self.q_norm(q).to(q.dtype)
348
+ k = self.k_norm(k).to(k.dtype)
349
+
350
+ q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
351
+ k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
352
+ v = rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
353
+
354
+
355
+ if self.qk_norm and (self.qk_norm_share_param_across_head):
356
+ q = self.q_norm(q).to(q.dtype)
357
+ k = self.k_norm(k).to(k.dtype)
358
+
359
+
360
+ seqlen_offset, max_seqlen = 0, q.shape[1]
361
+ if past_key_values is not None:
362
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
363
+ max_seqlen = q.shape[1] + seqlen_offset
364
+
365
+ if attention_mask is not None:
366
+ # to deliminate the offsets of padding tokens
367
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
368
+ max_seqlen = q.shape[1] + max(seqlen_offset)
369
+
370
+ if self.max_position_embeddings is not None:
371
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
372
+ if self.rotary is not None:
373
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
374
+
375
+ if self.fgate_type == "full":
376
+ fgate_logit = self.fgate_proj(hidden_states)
377
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
378
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
379
+ elif self.fgate_type == "none":
380
+ log_fgate = torch.zeros((batch_size, self.num_heads, q_len), dtype=torch.float32, device=hidden_states.device)
381
+ else:
382
+ assert self.fgate_type in ["fixed", "bias_only"]
383
+ fgate_logit = torch.broadcast_to(self.fgate_bias, (batch_size, q_len, self.num_heads))
384
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
385
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
386
+
387
+ k = rearrange(k, 'b t h d -> b h t d')
388
+ if past_key_values is not None:
389
+ k, v, log_fgate = past_key_values.update(k, v, log_fgate, self.layer_idx)
390
+ # k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
391
+ q = rearrange(q, 'b t h d -> b h t d')
392
+
393
+ if self.num_kv_groups > 1:
394
+ assert False
395
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
396
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
397
+
398
+ # Contains at least one padding token in the sequence
399
+ if attention_mask is not None:
400
+ B, _, T = log_fgate.size()
401
+ assert attention_mask.size() == (B, T), ((B, T), attention_mask.size())
402
+ seq_start = T - attention_mask.sum(dim=-1)
403
+ o = forgetting_attention(
404
+ q, k, v,
405
+ log_fgate,
406
+ head_first=True,
407
+ seq_start=seq_start,
408
+ sm_scale=1 / math.sqrt(self.head_dim),
409
+ )
410
+ o = rearrange(o, "b h t d -> b t h d")
411
+ else:
412
+ o = forgetting_attention(
413
+ q, k, v,
414
+ log_fgate,
415
+ head_first=True,
416
+ sm_scale=1 / math.sqrt(self.head_dim),
417
+ )
418
+ o = rearrange(o, "b h t d -> b t h d")
419
+
420
+ o = o.reshape(batch_size, q_len, self.hidden_size)
421
+
422
+ if self.output_norm is not None:
423
+ o = self.output_norm(o)
424
+
425
+ if self.ogate_proj is not None:
426
+ # ogate = self.ogate act(self.ogate_proj(hidden_states))
427
+ # o = o * ogate
428
+ # ogate = act_gate(self.ogate_proj(hidden_states), o)
429
+ ogate_logit = self.ogate_proj(hidden_states)
430
+ dtype = ogate_logit.dtype
431
+ if self.ogate_act == "silu":
432
+ o = swiglu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
433
+ elif self.ogate_act == "sigmoid":
434
+ o = glu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
435
+ else:
436
+ raise ValueError(f"Unknown ogate act {self.ogate_act}")
437
+ else:
438
+ o = self.o_proj(o)
439
+
440
+ if not output_attentions:
441
+ attentions = None
442
+ else:
443
+ SAVE_HEADS = [0, 1, 2, 3]
444
+ # (B, H, T, T)
445
+ score = q[:, SAVE_HEADS] @ k[:, SAVE_HEADS].mT
446
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
447
+ decay_bias = (log_lambda[:, SAVE_HEADS, :, None] - log_lambda[:, SAVE_HEADS, None, :]).to(torch.bfloat16)
448
+ # normalized_score = torch.softmax(score, dim=-1)
449
+ attentions = (score, decay_bias)
450
+
451
+ return o, attentions, past_key_values
452
+
453
+ def init_shift_state(self, batch_size: int):
454
+ param = next(self.parameters())
455
+ state = dict()
456
+ try:
457
+ dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled("cuda") else torch.float32
458
+ except TypeError:
459
+ # Support legacy torch version
460
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else torch.float32
461
+ if self.use_k_shift:
462
+ state['key_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
463
+ else:
464
+ state['key_shift'] = None
465
+ if self.use_v_shift:
466
+ state['value_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
467
+ else:
468
+ state['value_shift'] = None
469
+ return state
470
+
471
+
472
+ class ForgettingTransformerMLP(nn.Module):
473
+
474
+ def __init__(
475
+ self,
476
+ hidden_size: int,
477
+ hidden_ratio: Optional[float] = None,
478
+ intermediate_size: Optional[int] = None,
479
+ hidden_act: str = 'swish'
480
+ ) -> ForgettingTransformerMLP:
481
+ super().__init__()
482
+
483
+ self.hidden_size = hidden_size
484
+ # the final number of params is `hidden_ratio * hidden_size^2`
485
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
486
+ if hidden_ratio is None:
487
+ hidden_ratio = 4
488
+ if intermediate_size is None:
489
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
490
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
491
+ self.hidden_ratio = hidden_ratio
492
+ self.intermediate_size = intermediate_size
493
+
494
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
495
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
496
+ self.act_fn = ACT2FN[hidden_act]
497
+ self.hidden_act = hidden_act
498
+ assert hidden_act in ["swish", "sigmoid"]
499
+
500
+ def forward(self, x):
501
+ y = self.gate_proj(x)
502
+ gate, y = y.chunk(2, -1)
503
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
504
+ if self.hidden_act == "swish":
505
+ return swiglu_linear(
506
+ gate, y,
507
+ self.down_proj.weight.to(y.dtype),
508
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
509
+ )
510
+ elif self.hidden_act == "sigmoid":
511
+ return glu_linear(
512
+ gate, y,
513
+ self.down_proj.weight.to(y.dtype),
514
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
515
+ )
516
+ else:
517
+ raise ValueError()
518
+
519
+
520
+ class ForgettingTransformerBlock(nn.Module):
521
+ def __init__(self, config, layer_idx: int):
522
+ super().__init__()
523
+ self.hidden_size = config.hidden_size
524
+
525
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
526
+ self.attn = ForgettingAttentionLayer(
527
+ hidden_size=config.hidden_size,
528
+ num_heads=config.num_heads,
529
+ num_kv_heads=config.num_kv_heads,
530
+ window_size=config.window_size,
531
+ max_position_embeddings=config.max_position_embeddings,
532
+ rope_base=config.rope_base,
533
+ use_rope=config.use_rope,
534
+ use_output_gate=config.use_output_gate,
535
+ ogate_act=config.ogate_act,
536
+ fgate_type=config.fgate_type,
537
+ fgate_bias_init=config.fgate_bias_init,
538
+ decay_time_min=config.decay_time_min,
539
+ decay_time_max=config.decay_time_max,
540
+ use_output_norm = config.use_output_norm,
541
+ norm_eps=config.norm_eps,
542
+ qk_norm=config.qk_norm,
543
+ qk_norm_share_param_across_head=config.qk_norm_share_param_across_head,
544
+ use_k_shift=config.use_k_shift,
545
+ use_v_shift=config.use_v_shift,
546
+ initializer_range=config.initializer_range,
547
+ layer_idx=layer_idx
548
+ )
549
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
550
+ self.mlp = ForgettingTransformerMLP(
551
+ hidden_size=config.hidden_size,
552
+ hidden_ratio=config.hidden_ratio,
553
+ intermediate_size=config.intermediate_size,
554
+ hidden_act=config.hidden_act
555
+ )
556
+
557
+ def forward_attn(
558
+ self,
559
+ hidden_states: torch.Tensor,
560
+ attention_mask: Optional[torch.Tensor] = None,
561
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
562
+ output_attentions: Optional[bool] = False,
563
+ use_cache: Optional[bool] = False,
564
+ **kwargs,
565
+ ):
566
+ # residual handled outside of this
567
+ # residual = hidden_states
568
+ hidden_states = self.attn_norm(hidden_states)
569
+ hidden_states, attentions, past_key_values = self.attn(
570
+ hidden_states=hidden_states,
571
+ attention_mask=attention_mask,
572
+ past_key_values=past_key_values,
573
+ use_cache=use_cache,
574
+ output_attentions=output_attentions
575
+ )
576
+ return hidden_states, attentions, past_key_values
577
+
578
+ def forward_mlp(
579
+ self,
580
+ hidden_states: torch.Tensor,
581
+ residual: torch.Tensor,
582
+ ):
583
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
584
+ hidden_states = self.mlp(hidden_states)
585
+ hidden_states = residual + hidden_states
586
+
587
+ return hidden_states
588
+
589
+ def forward(
590
+ self,
591
+ hidden_states: torch.Tensor,
592
+ attention_mask: Optional[torch.Tensor] = None,
593
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
594
+ output_attentions: Optional[bool] = False,
595
+ use_cache: Optional[bool] = False,
596
+ gradient_checkpointing: bool = False
597
+ # **kwargs,
598
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
599
+
600
+ residual = hidden_states
601
+
602
+
603
+ if gradient_checkpointing:
604
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
605
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
606
+ else:
607
+ forward_attn = self.forward_attn
608
+ forward_mlp = self.forward_mlp
609
+
610
+ hidden_states, attentions, past_key_values = forward_attn(
611
+ hidden_states=hidden_states,
612
+ attention_mask=attention_mask,
613
+ past_key_values=past_key_values,
614
+ use_cache=use_cache,
615
+ output_attentions=output_attentions
616
+ )
617
+
618
+ hidden_states = forward_mlp(
619
+ hidden_states,
620
+ residual,
621
+ )
622
+
623
+ outputs = (hidden_states,)
624
+
625
+ if output_attentions:
626
+ outputs += (attentions,)
627
+
628
+ if use_cache:
629
+ outputs += (past_key_values,)
630
+
631
+ return outputs
632
+
633
+
634
+
635
+ class ForgettingTransformerPreTrainedModel(PreTrainedModel):
636
+
637
+ config_class = ForgettingTransformerConfig
638
+ supports_gradient_checkpointing = True
639
+ _no_split_modules = ['ForgettingTransformerBlock']
640
+
641
+ def __init__(self, config, *inputs, **kwargs):
642
+ # 动态修复 config_class 以支持远程代码加载
643
+ if hasattr(config, '__class__'):
644
+ config_module = config.__class__.__module__
645
+ if 'transformers_modules' in config_module or config_module == 'configuration_forgetting_transformer':
646
+ self.__class__.config_class = config.__class__
647
+ super().__init__(config, *inputs, **kwargs)
648
+
649
+ def _init_weights(
650
+ self,
651
+ module: nn.Module,
652
+ ):
653
+ # if isinstance(module, (nn.Linear, nn.Conv1d)):
654
+ if isinstance(module, (nn.Linear)):
655
+ # Slightly different from the TF version which uses truncated_normal for initialization
656
+ # cf https://github.com/pytorch/pytorch/pull/5617
657
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
658
+ if module.bias is not None:
659
+ nn.init.zeros_(module.bias)
660
+ elif isinstance(module, nn.Embedding):
661
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
662
+ if module.padding_idx is not None:
663
+ module.weight.data[module.padding_idx].zero_()
664
+
665
+
666
+ class ForgettingTransformerModel(ForgettingTransformerPreTrainedModel):
667
+
668
+ def __init__(self, config):
669
+ super().__init__(config)
670
+ self.padding_idx = config.pad_token_id
671
+ self.vocab_size = config.vocab_size
672
+
673
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
674
+ self.layers = nn.ModuleList([ForgettingTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
675
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
676
+
677
+ self.gradient_checkpointing = False
678
+
679
+ self.post_init()
680
+
681
+ def get_input_embeddings(self):
682
+ return self.embeddings
683
+
684
+ def set_input_embeddings(self, value):
685
+ self.embeddings = value
686
+
687
+ def forward(
688
+ self,
689
+ input_ids: Optional[torch.LongTensor] = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
692
+ inputs_embeds: Optional[torch.FloatTensor] = None,
693
+ use_cache: Optional[bool] = None,
694
+ output_attentions: Optional[bool] = None,
695
+ output_hidden_states: Optional[bool] = None,
696
+ return_dict: Optional[bool] = None
697
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
698
+ # if output_attentions:
699
+ # warnings.warn(
700
+ # "`ForgettingTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
701
+ # )
702
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
703
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
704
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
705
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
706
+
707
+ # retrieve input_ids and inputs_embeds
708
+ if input_ids is not None and inputs_embeds is not None:
709
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
710
+ elif input_ids is None and inputs_embeds is None:
711
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
712
+
713
+ if use_cache:
714
+ # use_legacy_cache = not isinstance(past_key_values, Cache)
715
+ # if use_legacy_cache:
716
+ # past_key_values = FgateDynamicCache.from_legacy_cache(past_key_values)
717
+ if past_key_values is None:
718
+ past_key_values = FgateDynamicCache()
719
+ for layer_idx, layer in enumerate(self.layers):
720
+ shift_state = layer.attn.init_shift_state(
721
+ batch_size=input_ids.size(0),
722
+ )
723
+ past_key_values.update_shift_cache(
724
+ key_shift_state=shift_state["key_shift"],
725
+ value_shift_state=shift_state["value_shift"],
726
+ layer_idx=layer_idx
727
+ )
728
+ else:
729
+ assert isinstance(past_key_values, FgateDynamicCache)
730
+
731
+ if inputs_embeds is None:
732
+ inputs_embeds = self.embeddings(input_ids)
733
+
734
+ # embed positions
735
+ hidden_states = inputs_embeds
736
+
737
+ if self.gradient_checkpointing and self.training:
738
+ if use_cache:
739
+ logger.warning_once(
740
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
741
+ )
742
+ use_cache = False
743
+
744
+ all_hidden_states = () if output_hidden_states else None
745
+ all_attns = {} if output_attentions else None
746
+ next_decoder_cache = None
747
+
748
+ for layer_id, layer in enumerate(self.layers):
749
+ if output_hidden_states:
750
+ all_hidden_states += (hidden_states,)
751
+
752
+ layer_outputs = layer(
753
+ hidden_states,
754
+ attention_mask=attention_mask,
755
+ past_key_values=past_key_values,
756
+ output_attentions=output_attentions,
757
+ use_cache=use_cache,
758
+ gradient_checkpointing=self.gradient_checkpointing and self.training
759
+ )
760
+
761
+ hidden_states = layer_outputs[0]
762
+
763
+ if use_cache:
764
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
765
+
766
+ if output_attentions:
767
+ OUTPUT_ATTN_LAYERS = [0, 7, 15, 23]
768
+ if layer_id in OUTPUT_ATTN_LAYERS:
769
+ # all_attns += (layer_outputs[1],)
770
+ all_attns[layer_id] = layer_outputs[1]
771
+
772
+ hidden_states = self.norm(hidden_states)
773
+
774
+ # add hidden states from the last decoder layer
775
+ if output_hidden_states:
776
+ all_hidden_states += (hidden_states,)
777
+
778
+ next_cache = None
779
+ if use_cache:
780
+ # next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
781
+ next_cache = next_decoder_cache
782
+ if not return_dict:
783
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
784
+
785
+ return BaseModelOutputWithPast(
786
+ last_hidden_state=hidden_states,
787
+ past_key_values=next_cache,
788
+ hidden_states=all_hidden_states,
789
+ attentions=all_attns
790
+ )
791
+
792
+
793
+ class ForgettingTransformerForCausalLM(ForgettingTransformerPreTrainedModel):
794
+ _tied_weights_keys = ["lm_head.weight"]
795
+
796
+ def __init__(self, config):
797
+ super().__init__(config)
798
+ self.model = ForgettingTransformerModel(config)
799
+ self.vocab_size = config.vocab_size
800
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
801
+
802
+ # Initialize weights and apply final processing
803
+ self.post_init()
804
+
805
+ def get_input_embeddings(self):
806
+ return self.model.embeddings
807
+
808
+ def set_input_embeddings(self, value):
809
+ self.model.embeddings = value
810
+
811
+ def get_output_embeddings(self):
812
+ return self.lm_head
813
+
814
+ def set_output_embeddings(self, new_embeddings):
815
+ self.lm_head = new_embeddings
816
+
817
+ def set_decoder(self, decoder):
818
+ self.model = decoder
819
+
820
+ def get_decoder(self):
821
+ return self.model
822
+
823
+ def prepare_inputs_for_generation(
824
+ self,
825
+ input_ids: torch.LongTensor = None,
826
+ past_key_values: Optional[torch.Tensor] = None,
827
+ attention_mask: Optional[torch.Tensor] = None,
828
+ inputs_embeds: Optional[torch.Tensor] = None,
829
+ **kwargs
830
+ ):
831
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
832
+ if past_key_values is not None:
833
+ input_ids = input_ids[:, -1:]
834
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
835
+ if inputs_embeds is not None and past_key_values is None:
836
+ model_inputs = {'inputs_embeds': inputs_embeds}
837
+ else:
838
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
839
+ # recompiles graphs as the stride of the inputs is a guard.
840
+ # Ref: https://github.com/huggingface/transformers/pull/29114
841
+ # TODO: use `next_tokens` directly instead.
842
+ model_inputs = {'input_ids': input_ids.contiguous()}
843
+
844
+ model_inputs.update({
845
+ 'past_key_values': past_key_values,
846
+ 'use_cache': kwargs.get('use_cache'),
847
+ 'attention_mask': attention_mask,
848
+ })
849
+ return model_inputs
850
+
851
+ def forward(
852
+ self,
853
+ input_ids: torch.LongTensor = None,
854
+ attention_mask: Optional[torch.Tensor] = None,
855
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
856
+ inputs_embeds: Optional[torch.FloatTensor] = None,
857
+ labels: Optional[torch.LongTensor] = None,
858
+ use_cache: Optional[bool] = None,
859
+ output_attentions: Optional[bool] = None,
860
+ output_hidden_states: Optional[bool] = None,
861
+ return_dict: Optional[bool] = None,
862
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
863
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
864
+ output_hidden_states = (
865
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
866
+ )
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+
869
+ outputs = self.model(
870
+ input_ids=input_ids,
871
+ attention_mask=attention_mask,
872
+ past_key_values=past_key_values,
873
+ inputs_embeds=inputs_embeds,
874
+ use_cache=use_cache,
875
+ output_attentions=output_attentions,
876
+ output_hidden_states=output_hidden_states,
877
+ return_dict=return_dict
878
+ )
879
+
880
+ hidden_states = outputs[0]
881
+
882
+ loss = None
883
+ if labels is not None:
884
+ if self.config.fuse_cross_entropy:
885
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
886
+ else:
887
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
888
+ logits = self.lm_head(hidden_states)
889
+ # Enable model parallelism
890
+ labels = labels.to(logits.device)
891
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
892
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
893
+ loss = loss.view(*labels.size())
894
+ del logits
895
+ logits = None
896
+ else:
897
+ logits = self.lm_head(hidden_states)
898
+
899
+ if not return_dict:
900
+ raise NotImplementedError
901
+ output = (logits,) + outputs[1:]
902
+ return (loss,) + output if loss is not None else output
903
+
904
+ return CausalLMOutputWithPast(
905
+ loss=loss,
906
+ logits=logits,
907
+ past_key_values=outputs.past_key_values,
908
+ hidden_states=outputs.hidden_states,
909
+ attentions=outputs.attentions,
910
+ )
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # for HF remote code
__pycache__/__init__.cpython-310.pyc ADDED
Binary file (612 Bytes). View file
 
__pycache__/configuration_forgetting_transformer.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
__pycache__/fgate_cache.cpython-310.pyc ADDED
Binary file (6.38 kB). View file
 
__pycache__/glu_linear.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
__pycache__/modeling_forgetting_transformer.cpython-310.pyc ADDED
Binary file (24 kB). View file
 
__pycache__/token_shift.cpython-310.pyc ADDED
Binary file (6.37 kB). View file
 
configuration_forgetting_transformer.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+ class ForgettingTransformerConfig(PretrainedConfig):
6
+ model_type = 'forgetting_transformer'
7
+ keys_to_ignore_at_inference = ['past_key_values']
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size: int = 32000,
12
+ hidden_size: int = 2048,
13
+ hidden_ratio: Optional[float] = 4,
14
+ intermediate_size: Optional[int] = None,
15
+ num_hidden_layers: int = 24,
16
+ num_heads: int = 32,
17
+ num_kv_heads: int = None,
18
+ hidden_act: str = "swish",
19
+ window_size: Optional[int] = None,
20
+ max_position_embeddings: int = 2048,
21
+ initializer_range: float = 0.02,
22
+ elementwise_affine: Optional[bool] = True,
23
+ norm_eps: float = 1e-6,
24
+ use_cache: bool = True,
25
+ pad_token_id: int = None,
26
+ bos_token_id: int = 1,
27
+ eos_token_id: int = 2,
28
+ tie_word_embeddings: bool = False,
29
+ attention_bias: bool = False,
30
+ fuse_norm: bool = True,
31
+ fuse_cross_entropy: bool = True,
32
+ rope_base: float = 500000.0,
33
+ use_rope: bool = False,
34
+ use_output_gate: bool = False,
35
+ ogate_act: str = "sigmoid",
36
+ fgate_type: str = "full",
37
+ fgate_bias_init: bool = False,
38
+ decay_time_min: Optional[float] = None,
39
+ decay_time_max: Optional[float] = None,
40
+ use_output_norm: bool = False,
41
+ qk_norm: bool = False,
42
+ qk_norm_share_param_across_head: bool = False,
43
+ use_k_shift: bool = False,
44
+ use_v_shift: bool = False,
45
+ **kwargs,
46
+ ):
47
+ self.vocab_size = vocab_size
48
+ self.hidden_size = hidden_size
49
+ self.hidden_ratio = hidden_ratio
50
+ self.intermediate_size = intermediate_size
51
+ self.num_hidden_layers = num_hidden_layers
52
+ self.num_heads = num_heads
53
+ self.num_kv_heads = num_kv_heads
54
+ self.window_size = window_size
55
+ self.max_position_embeddings = max_position_embeddings
56
+ self.hidden_act = hidden_act
57
+ self.initializer_range = initializer_range
58
+ self.elementwise_affine = elementwise_affine
59
+ self.norm_eps = norm_eps
60
+ self.use_cache = use_cache
61
+ self.attention_bias = attention_bias
62
+ self.fuse_cross_entropy = fuse_cross_entropy
63
+ self.fuse_norm = fuse_norm
64
+ self.rope_base = rope_base
65
+ self.use_rope = use_rope
66
+ self.use_output_gate = use_output_gate
67
+ self.ogate_act = ogate_act
68
+ self.fgate_type = fgate_type
69
+ self.fgate_bias_init = fgate_bias_init
70
+ self.decay_time_min = decay_time_min
71
+ self.decay_time_max = decay_time_max
72
+ self.use_output_norm = use_output_norm
73
+ self.qk_norm = qk_norm
74
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
75
+ self.use_k_shift = use_k_shift
76
+ self.use_v_shift = use_v_shift
77
+
78
+ super().__init__(
79
+ pad_token_id=pad_token_id,
80
+ bos_token_id=bos_token_id,
81
+ eos_token_id=eos_token_id,
82
+ tie_word_embeddings=tie_word_embeddings,
83
+ **kwargs,
84
+ )
fgate_cache.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional, Any, Dict
2
+ import torch
3
+
4
+ class FgateDynamicCache:
5
+ """
6
+ A cache that grows dynamically as more tokens are generated.
7
+ Custom cache for Forgetting Transformer that does not inherit from transformers.Cache.
8
+ """
9
+
10
+ def __init__(self, num_hidden_layers: Optional[int] = None) -> None:
11
+ self.key_cache: List[torch.Tensor] = []
12
+ self.value_cache: List[torch.Tensor] = []
13
+ self.log_fgate_cache: List[torch.Tensor] = []
14
+ self.key_shift_cache: List[torch.Tensor] = []
15
+ self.value_shift_cache: List[torch.Tensor] = []
16
+ self._seen_tokens = 0
17
+
18
+ def update_shift_cache(
19
+ self,
20
+ key_shift_state: torch.Tensor,
21
+ value_shift_state: torch.Tensor,
22
+ layer_idx,
23
+ ):
24
+ assert layer_idx == len(self.key_shift_cache) == len(self.value_shift_cache)
25
+ self.key_shift_cache.append(key_shift_state)
26
+ self.value_shift_cache.append(value_shift_state)
27
+
28
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
29
+ if layer_idx < len(self):
30
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
31
+ else:
32
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
33
+
34
+ def __iter__(self):
35
+ for layer_idx in range(len(self)):
36
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
37
+
38
+ def __len__(self):
39
+ return len(self.key_cache)
40
+
41
+ def update(
42
+ self,
43
+ key_states: torch.Tensor,
44
+ value_states: torch.Tensor,
45
+ log_fgate_states: torch.Tensor,
46
+ layer_idx: int,
47
+ cache_kwargs: Optional[Dict[str, Any]] = None,
48
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
49
+ assert log_fgate_states.ndim == 3, f"log_fgate must be (B, H, T), but get {log_fgate_states.size()}"
50
+ if layer_idx == 0:
51
+ self._seen_tokens += key_states.shape[-2]
52
+
53
+ if len(self.key_cache) <= layer_idx:
54
+ self.key_cache.append(key_states)
55
+ self.value_cache.append(value_states)
56
+ self.log_fgate_cache.append(log_fgate_states)
57
+ else:
58
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
59
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
60
+ self.log_fgate_cache[layer_idx] = torch.cat([self.log_fgate_cache[layer_idx], log_fgate_states], dim=-1)
61
+
62
+ return self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]
63
+
64
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
65
+ if len(self.key_cache) <= layer_idx:
66
+ return 0
67
+ return self.key_cache[layer_idx].shape[-2]
68
+
69
+ def get_max_length(self) -> Optional[int]:
70
+ return None
71
+
72
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], ...]:
73
+ legacy_cache = ()
74
+ for layer_idx in range(len(self)):
75
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]),)
76
+ return legacy_cache
77
+
78
+ @classmethod
79
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_layers: Optional[int] = None) -> "FgateDynamicCache":
80
+ """
81
+ Converts a cache in the legacy cache format into an equivalent FgateDynamicCache.
82
+
83
+ Args:
84
+ past_key_values: Optional legacy cache format
85
+ num_layers: Not used in this implementation
86
+
87
+ Returns:
88
+ FgateDynamicCache instance
89
+ """
90
+ cache = cls()
91
+
92
+ if past_key_values is not None:
93
+ for layer_idx in range(len(past_key_values)):
94
+ key_states, value_states, log_fgate_states = past_key_values[layer_idx]
95
+ cache.update(key_states, value_states, log_fgate_states, layer_idx)
96
+
97
+ return cache
98
+
99
+ def crop(self, max_length: int):
100
+ if max_length < 0:
101
+ max_length = self.get_seq_length() - abs(max_length)
102
+
103
+ if self.get_seq_length() <= max_length:
104
+ return
105
+
106
+ self._seen_tokens = max_length
107
+ for idx in range(len(self.key_cache)):
108
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
109
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
110
+ self.log_fgate_cache[idx] = self.log_fgate_cache[idx][..., :max_length]
111
+
112
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["FgateDynamicCache"]:
113
+ out = []
114
+ for i in range(0, full_batch_size, split_size):
115
+ current_split = FgateDynamicCache()
116
+ current_split._seen_tokens = self._seen_tokens
117
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
118
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
119
+ current_split.log_fgate_cache = [tensor[i : i + split_size] for tensor in self.log_fgate_cache]
120
+ out.append(current_split)
121
+ return out
122
+
123
+ @classmethod
124
+ def from_batch_splits(cls, splits: List["FgateDynamicCache"]) -> "FgateDynamicCache":
125
+ cache = cls()
126
+ for idx in range(len(splits[0])):
127
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
128
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
129
+ layer_log_fgates = torch.cat([current.log_fgate_cache[idx] for current in splits], dim=0)
130
+ cache.update(layer_keys, layer_values, layer_log_fgates, idx)
131
+ return cache
132
+
133
+ def batch_repeat_interleave(self, repeats: int):
134
+ for layer_idx in range(len(self)):
135
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
136
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
137
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx].repeat_interleave(repeats, dim=0)
138
+
139
+ def batch_select_indices(self, indices: torch.Tensor):
140
+ for layer_idx in range(len(self)):
141
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
142
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
143
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx][indices, ...]
fgate_cache.py.backup ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional, Any, Dict
2
+ import torch
3
+ from transformers.cache_utils import Cache
4
+
5
+ class FgateDynamicCache(Cache):
6
+ """
7
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
8
+
9
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
10
+ `[batch_size, num_heads, seq_len, head_dim]`.
11
+
12
+ Example:
13
+
14
+ ```python
15
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
16
+
17
+ >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
18
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
19
+
20
+ >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
21
+
22
+ >>> # Prepare a cache class and pass it to model's forward
23
+ >>> past_key_values = DynamicCache()
24
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
25
+ >>> outputs.past_key_values # access cache filled with key/values from generation
26
+ DynamicCache()
27
+ ```
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ super().__init__()
32
+ self.key_cache: List[torch.Tensor] = []
33
+ self.value_cache: List[torch.Tensor] = []
34
+ self.log_fgate_cache: List[torch.Tensor] = []
35
+
36
+ self.key_shift_cache: List[torch.Tensor] = []
37
+ self.value_shift_cache: List[torch.Tensor] = []
38
+
39
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
40
+
41
+ def update_shift_cache(
42
+ self,
43
+ key_shift_state: torch.Tensor,
44
+ value_shift_state: torch.Tensor,
45
+ layer_idx,
46
+ ):
47
+ assert layer_idx == len(self.key_shift_cache) == len(self.value_shift_cache)
48
+ self.key_shift_cache.append(key_shift_state)
49
+ self.value_shift_cache.append(value_shift_state)
50
+
51
+
52
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
53
+ """
54
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
55
+ sequence length.
56
+ """
57
+ if layer_idx < len(self):
58
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
59
+ else:
60
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
61
+
62
+ def __iter__(self):
63
+ """
64
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
65
+ keys and values
66
+ """
67
+ for layer_idx in range(len(self)):
68
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
69
+
70
+ def __len__(self):
71
+ """
72
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
73
+ to the number of layers in the model.
74
+ """
75
+ return len(self.key_cache)
76
+
77
+ def update(
78
+ self,
79
+ key_states: torch.Tensor,
80
+ value_states: torch.Tensor,
81
+ log_fgate_states: torch.Tensor,
82
+ layer_idx: int,
83
+ cache_kwargs: Optional[Dict[str, Any]] = None,
84
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
85
+ """
86
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
87
+
88
+ Parameters:
89
+ key_states (`torch.Tensor`):
90
+ The new key states to cache.
91
+ value_states (`torch.Tensor`):
92
+ The new value states to cache.
93
+ layer_idx (`int`):
94
+ The index of the layer to cache the states for.
95
+ cache_kwargs (`Dict[str, Any]`, `optional`):
96
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
97
+
98
+ Return:
99
+ A tuple containing the updated key and value states.
100
+ """
101
+ assert log_fgate_states.ndim == 3, f"log_fgate must be (B, H, T), but get {log_fgate_states.size()}"
102
+ # Update the number of seen tokens
103
+ if layer_idx == 0:
104
+ self._seen_tokens += key_states.shape[-2]
105
+
106
+ # Update the cache
107
+ if len(self.key_cache) <= layer_idx:
108
+ self.key_cache.append(key_states)
109
+ self.value_cache.append(value_states)
110
+ self.log_fgate_cache.append(log_fgate_states)
111
+ else:
112
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
113
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
114
+ self.log_fgate_cache[layer_idx] = torch.cat([self.log_fgate_cache[layer_idx], log_fgate_states], dim=-1)
115
+
116
+ return self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]
117
+
118
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
119
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
120
+ # TODO: deprecate this function in favor of `cache_position`
121
+ if len(self.key_cache) <= layer_idx:
122
+ return 0
123
+ return self.key_cache[layer_idx].shape[-2]
124
+
125
+ def get_max_length(self) -> Optional[int]:
126
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
127
+ return None
128
+
129
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
130
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
131
+ backward compatibility."""
132
+ legacy_cache = ()
133
+ for layer_idx in range(len(self)):
134
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]),)
135
+ return legacy_cache
136
+
137
+ @classmethod
138
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_layers: Optional[int] = None) -> "DynamicCache":
139
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
140
+ backward compatibility."""
141
+ raise NotImplementedError
142
+ assert num_layers is not None
143
+ cache = cls(num_layers)
144
+ if past_key_values is not None:
145
+ for layer_idx in range(len(past_key_values)):
146
+ key_states, value_states, log_fgate_states = past_key_values[layer_idx]
147
+ cache.update(key_states, value_states, log_fgate_states, layer_idx)
148
+ return cache
149
+
150
+ def crop(self, max_length: int):
151
+ """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
152
+ negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
153
+ # In case it is negative
154
+ if max_length < 0:
155
+ max_length = self.get_seq_length() - abs(max_length)
156
+
157
+ if self.get_seq_length() <= max_length:
158
+ return
159
+
160
+ self._seen_tokens = max_length
161
+ for idx in range(len(self.key_cache)):
162
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
163
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
164
+ self.log_fgate_cache[idx] = self.log_fgate_cache[idx][..., :max_length]
165
+
166
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["DynamicCache"]:
167
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
168
+ `_split_model_inputs()` in `generation.utils`"""
169
+ out = []
170
+ for i in range(0, full_batch_size, split_size):
171
+ current_split = DynamicCache()
172
+ current_split._seen_tokens = self._seen_tokens
173
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
174
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
175
+ current_split.log_fgate_cache = [tensor[i : i + split_size] for tensor in self.log_fgate_cache]
176
+ out.append(current_split)
177
+ return out
178
+
179
+ @classmethod
180
+ def from_batch_splits(cls, splits: List["DynamicCache"]) -> "DynamicCache":
181
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
182
+ `generation.utils`"""
183
+ cache = cls()
184
+ for idx in range(len(splits[0])):
185
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
186
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
187
+ layer_log_fgates = torch.cat([current.log_fgate_cache[idx] for current in splits], dim=0)
188
+ cache.update(layer_keys, layer_values, layer_log_fgates, idx)
189
+ return cache
190
+
191
+ def batch_repeat_interleave(self, repeats: int):
192
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
193
+ for layer_idx in range(len(self)):
194
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
195
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
196
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx].repeat_interleave(repeats, dim=0)
197
+
198
+ def batch_select_indices(self, indices: torch.Tensor):
199
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
200
+ for layer_idx in range(len(self)):
201
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
202
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
203
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx][indices, ...]
glu_linear.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+
5
+ glu_fwd_codestring = """
6
+ template <typename T> T glu_fwd(T x, T y) {
7
+ return float(y) / (1.0f + ::exp(-float(x)));
8
+ }
9
+ """
10
+ glu_bwd_codestring = """
11
+ template <typename T> T glu_bwd(T x, T y, T g, T& dx, T& dy) {
12
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
13
+ dx = x_sigmoid * (1.0f - x_sigmoid) * float(g) * float(y);
14
+ dy = x_sigmoid * float(g);
15
+ }
16
+ """
17
+
18
+ glu_bwd_with_output_codestring = """
19
+ template <typename T> T glu_bwd_with_output(T x, T y, T g, T& dx, T& dy, T& z) {
20
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
21
+ dx = x_sigmoid * (1.0f - x_sigmoid) * float(g) * float(y);
22
+ dy = x_sigmoid * float(g);
23
+ z = x_sigmoid * float(y);
24
+ }
25
+ """
26
+
27
+ glu_fwd = torch.cuda.jiterator._create_jit_fn(glu_fwd_codestring)
28
+ glu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(glu_bwd_codestring, num_outputs=2)
29
+ glu_bwd_with_output = torch.cuda.jiterator._create_multi_output_jit_fn(glu_bwd_with_output_codestring, num_outputs=3)
30
+
31
+
32
+ class GLULinearFunction(torch.autograd.Function):
33
+ r"""
34
+ Gated Linear Unit (GLU) function followed by a linear transformation.
35
+
36
+ .. math::
37
+ \text{GLULinear}(x, y, W, b) = (sh(x) * y) W + b
38
+
39
+ This simple wrap discards the intermediate results of GLU(x, y) to save memory.
40
+ """
41
+
42
+ @staticmethod
43
+ def forward(ctx, x, y, weight, bias):
44
+ z = glu_fwd(x, y)
45
+ out = F.linear(z.to(weight.dtype), weight, bias)
46
+ # We don't store z, will be recomputed in the backward pass to save memory
47
+ ctx.save_for_backward(x, y, weight)
48
+ ctx.linear_bias_is_none = bias is None
49
+ return out
50
+
51
+ @staticmethod
52
+ def backward(ctx, dout, *args):
53
+ x, y, weight = ctx.saved_tensors
54
+ dout = dout.reshape(-1, dout.shape[-1])
55
+ dz = F.linear(dout, weight.t()).view_as(x)
56
+ dx, dy, z = glu_bwd_with_output(x, y, dz)
57
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, z.reshape(-1, z.shape[-1]))
58
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
59
+ return dx, dy, dlinear_weight, dlinear_bias
60
+
61
+ glu_linear = GLULinearFunction.apply
modeling_forgetting_transformer.py ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.cache_utils import Cache
14
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast)
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import logging
18
+
19
+ # from fla.layers.attn import Attention
20
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
21
+ from fla.modules.layernorm import group_norm_fn
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from einops import rearrange
26
+
27
+ # 动态导入配置类以支持本地和HuggingFace Hub加载
28
+ try:
29
+ from .configuration_forgetting_transformer import ForgettingTransformerConfig
30
+ except (ImportError, ValueError):
31
+ try:
32
+ from configuration_forgetting_transformer import ForgettingTransformerConfig
33
+ except ImportError:
34
+ from forgetting_transformer.model.forgetting_transformer.configuration_forgetting_transformer import ForgettingTransformerConfig
35
+
36
+ from forgetting_transformer.ops.forgetting_attention_std import forgetting_attention_std as forgetting_attention
37
+ from .fgate_cache import FgateDynamicCache
38
+ from .glu_linear import glu_linear
39
+ from .token_shift import token_shift
40
+
41
+ from functools import partial
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ class ShiftLinear(nn.Module):
47
+
48
+ def __init__(
49
+ self,
50
+ input_dim: int,
51
+ output_dim: int,
52
+ num_heads: int,
53
+ bias: bool,
54
+ shift_bias: bool = False
55
+ ):
56
+ super().__init__()
57
+
58
+ self.input_dim = input_dim
59
+ self.output_dim = output_dim
60
+ self.num_heads = num_heads
61
+ assert self.output_dim % self.num_heads == 0
62
+
63
+ self.linear = nn.Linear(input_dim, output_dim, bias=bias)
64
+ self.shift_proj = nn.Linear(input_dim, num_heads, bias=shift_bias)
65
+
66
+ def __repr__(self) -> str:
67
+ s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim})"
68
+ return s
69
+
70
+ def forward(self, x: torch.Tensor, shift_state: Optional[torch.Tensor]) -> torch.Tensor:
71
+ assert x.ndim == 3, "Input must be (B, T, D)"
72
+ B, T, D = x.size()
73
+ out = self.linear(x)
74
+ # (B, T, H, 1)
75
+ alpha = torch.sigmoid(self.shift_proj(x).float()).float()
76
+ # left, right, top, bottom (B, T=H, D=W)
77
+ # out_prev = nn.functional.pad(out, (0, 0, 1, -1))
78
+ # out_prev = torch.roll(out, shifts=1, dims=1)
79
+
80
+ out_per_head = rearrange(out, 'b t (h d) -> b t h d', h=self.num_heads)
81
+ if T > 1:
82
+ # TODO: note in this case cache is not used
83
+ result_per_head = token_shift(out_per_head, alpha, 1.0 - alpha)
84
+ else:
85
+ shift_state_per_head = rearrange(shift_state, 'b (h d) -> b 1 h d', h=self.num_heads)
86
+ result_per_head = (alpha[..., None] * shift_state_per_head + (1 - alpha[..., None]) * out_per_head)
87
+
88
+ result_per_head = result_per_head.to(out.dtype)
89
+
90
+ if shift_state is not None:
91
+ shift_state.copy_(out[:, -1, :])
92
+
93
+ result = rearrange(result_per_head, 'b t h d -> b t (h d)', h=self.num_heads)
94
+ return result
95
+
96
+ class GroupRMSNorm(nn.Module):
97
+ def __init__(
98
+ self,
99
+ num_groups: int,
100
+ hidden_size: int,
101
+ elementwise_affine: bool = True,
102
+ bias: bool = False,
103
+ eps: float = 1e-5
104
+ ) -> GroupRMSNorm:
105
+ super().__init__()
106
+
107
+ if hidden_size % num_groups != 0:
108
+ raise ValueError('num_channels must be divisible by num_groups')
109
+
110
+ self.num_groups = num_groups
111
+ self.hidden_size = hidden_size
112
+ self.elementwise_affine = elementwise_affine
113
+ self.eps = eps
114
+
115
+ self.register_parameter("weight", None)
116
+ self.register_parameter("bias", None)
117
+ if elementwise_affine:
118
+ self.weight = nn.Parameter(torch.ones(hidden_size))
119
+ if bias:
120
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
121
+
122
+ def __repr__(self) -> str:
123
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
124
+ if not self.elementwise_affine:
125
+ s += f", elementwise_affine={self.elementwise_affine}"
126
+ s += f", eps={self.eps}"
127
+ s += ")"
128
+ return s
129
+
130
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
131
+ return group_norm_fn(
132
+ x,
133
+ self.weight,
134
+ self.bias,
135
+ residual=residual,
136
+ eps=self.eps,
137
+ prenorm=prenorm,
138
+ residual_in_fp32=residual_in_fp32,
139
+ is_rms_norm=True,
140
+ num_groups=self.num_groups
141
+ )
142
+
143
+ class ForgettingAttentionLayer(nn.Module):
144
+
145
+ def __init__(
146
+ self,
147
+ hidden_size: int = 2048,
148
+ num_heads: int = 32,
149
+ num_kv_heads: Optional[int] = None,
150
+ window_size: Optional[int] = None,
151
+ max_position_embeddings: Optional[int] = None,
152
+ use_rope: bool = False,
153
+ rope_base: float = 500000.0,
154
+ use_output_gate: bool = False,
155
+ ogate_act: str = "sigmoid",
156
+ fgate_type: str = "full",
157
+ fgate_bias_init: bool = False,
158
+ decay_time_min: Optional[float] = None,
159
+ decay_time_max: Optional[float] = None,
160
+ use_output_norm: bool = False,
161
+ norm_eps: float = 1e-6,
162
+ qk_norm: bool = False,
163
+ qk_norm_share_param_across_head: bool = False,
164
+ use_k_shift: bool = False,
165
+ use_v_shift: bool = False,
166
+ initializer_range: float = 0.02,
167
+ layer_idx: int = None
168
+ ):
169
+ """
170
+ Forgetting Attention layer.
171
+
172
+ Arguments:
173
+ - hidden_size: Input dimension and qkv dimension
174
+ - num_heads: Number of heads
175
+ - num_kv_heads: Not used. Should be None
176
+ - window_size: Not used. Should be None
177
+ - max_position_embeddings: Not used. Should be None
178
+ - use_rope: Whether to use RoPE. Default is False
179
+ - rope_base: the theta hyperparameter in RoPE. This has no effect if
180
+ use_rope=False
181
+ - use_output_gate: Whether to use output gates. Note that using output gates
182
+ introduces extra parameters and you may want to reduce parameters from
183
+ other components (e.g., MLPs)
184
+ - ogate_act: Activation for the output gate. Either "sigmoid" or "silu"
185
+ - fgate_type: Forget gate type. The following are supported:
186
+ - "full": The default data-dependent forget gate
187
+ - "bias_only": The data-independent forget gate
188
+ - "fixed": Forget gates with fixed values
189
+ - "none": Not using forget gates. Equivalent to forget gates with all
190
+ ones.
191
+ - fgate_bias_init: Whether to use special initalization for the bias terms in
192
+ the forget gate. This should only be used with fgate types in
193
+ ["bias_only", "fixed"].
194
+ - decay_time_min: T_min for the forget gate bias initialization. See paper
195
+ for details.
196
+ - decay_time_max: T_max for the forget gate bias initalization. See paper
197
+ for details.
198
+ - use_output_norm: Whether to use output normalization.
199
+ - norm_eps: Epsilon for the RMSNorms
200
+ - qk_norm: Whether to use qk_norm
201
+ - qk_norm_share_param_across_head: In QK-norm, whether to share the RMSNorm
202
+ scaling parameters across heads. This is just for backward compatibility.
203
+ - use_k_shift: Whether to use data-dependent key shift
204
+ - use_v_shift: Whether to use data-dependent value shift
205
+ - initializer_range: standard deviation for initialization
206
+ - layer_idx: The block index of this layer. Needed for KV-cache
207
+ """
208
+ super().__init__()
209
+
210
+ self.num_heads = num_heads
211
+ if num_kv_heads is None:
212
+ self.num_kv_heads = self.num_heads
213
+ else:
214
+ raise NotImplementedError("GQA has not been tested.")
215
+ self.num_kv_heads = num_kv_heads
216
+ self.num_kv_groups = num_heads // self.num_kv_heads
217
+ self.hidden_size = hidden_size
218
+ self.head_dim = self.hidden_size // self.num_heads
219
+ self.kv_dim = self.num_kv_heads * self.head_dim
220
+ self.kv_dim = self.num_kv_heads * self.head_dim
221
+ self.window_size = window_size
222
+ self.max_position_embeddings = max_position_embeddings
223
+ self.layer_idx = layer_idx
224
+
225
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
226
+ if use_k_shift:
227
+ self.k_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
228
+ else:
229
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
230
+
231
+ if use_v_shift:
232
+ self.v_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
233
+ else:
234
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
235
+
236
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
237
+ self.use_k_shift = use_k_shift
238
+ self.use_v_shift = use_v_shift
239
+
240
+
241
+ device = next(self.parameters()).device
242
+ # Forget gate
243
+ assert fgate_type in ["full", "bias_only", "fixed", "none"]
244
+ self.fgate_type = fgate_type
245
+ self.fgate_bias_init = fgate_bias_init
246
+ if fgate_type == "full":
247
+ assert not fgate_bias_init
248
+ self.fgate_proj = nn.Linear(self.hidden_size, self.num_heads, bias=True)
249
+ elif fgate_type == "bias_only":
250
+ self.fgate_bias = nn.Parameter(torch.zeros(size=(self.num_heads,), device=device))
251
+ self.fgate_bias._no_weight_decay = True
252
+ elif fgate_type == "fixed":
253
+ assert fgate_bias_init, "You must set fgate_bias_init = True with fixed fgate"
254
+ fgate_bias = torch.zeros(size=(self.num_heads,), device=device)
255
+ self.register_buffer("fgate_bias", fgate_bias)
256
+ elif fgate_type == "none":
257
+ pass
258
+ else:
259
+ raise ValueError(f"Unknown fgate type {fgate_type}")
260
+
261
+
262
+
263
+ # Forget gate intialization for data-independent and fixed forget gates
264
+ if fgate_bias_init:
265
+ assert decay_time_min is not None and decay_time_max is not None
266
+ assert decay_time_min > 0 and decay_time_max > 0
267
+ with torch.no_grad():
268
+ log_decay_time = torch.linspace(math.log(decay_time_min), math.log(decay_time_max), steps=self.num_heads)
269
+ decay_time = torch.exp(log_decay_time)
270
+ # Such that t = -1 / log(sigmoid(b))
271
+ bias_init = -torch.log(torch.expm1(1 / decay_time))
272
+ self.fgate_bias.copy_(bias_init)
273
+ else:
274
+ assert decay_time_min is None and decay_time_max is None
275
+
276
+ if use_output_gate:
277
+ self.ogate_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
278
+ self.ogate_act = ogate_act
279
+ assert ogate_act in ["silu", "sigmoid"]
280
+ else:
281
+ self.ogate_proj = None
282
+
283
+ if use_output_norm:
284
+ self.output_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size, eps=norm_eps)
285
+ else:
286
+ self.output_norm = None
287
+
288
+
289
+ if use_rope:
290
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
291
+ else:
292
+ self.rotary = None
293
+
294
+
295
+ self.qk_norm = qk_norm
296
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
297
+ if qk_norm:
298
+ if self.qk_norm_share_param_across_head:
299
+ # This is an incorrect implemention kept just for backward compatibility
300
+ self.q_norm = RMSNorm(self.head_dim)
301
+ self.k_norm = RMSNorm(self.head_dim)
302
+ else:
303
+ self.q_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
304
+ self.k_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
305
+
306
+ self.initializer_range = initializer_range
307
+ self.apply(self._initialize_weights)
308
+
309
+ def _initialize_weights(self, module: nn.Module):
310
+ # This will actually be overwritten by outer init.
311
+ if isinstance(module, nn.Linear):
312
+ nn.init.normal_(module.weight, mean=0.0, std=self.initializer_range)
313
+ if module.bias is not None:
314
+ nn.init.zeros_(module.bias)
315
+
316
+ def forward(
317
+ self,
318
+ hidden_states: torch.Tensor,
319
+ attention_mask: Optional[torch.LongTensor] = None,
320
+ past_key_values: Optional[Cache] = None,
321
+ output_attentions: bool = False,
322
+ use_cache: bool = False,
323
+ **kwargs,
324
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
325
+ """
326
+ We assume that during decoding attention mask is always 1. Otherwise it won't work.
327
+ """
328
+ batch_size, q_len, _ = hidden_states.size()
329
+ if use_cache:
330
+ key_shift_state = past_key_values.key_shift_cache[self.layer_idx]
331
+ value_shift_state = past_key_values.value_shift_cache[self.layer_idx]
332
+ else:
333
+ key_shift_state = value_shift_state = None
334
+
335
+ # Shift states are updated in place
336
+ q = self.q_proj(hidden_states)
337
+ if self.use_k_shift:
338
+ k = self.k_proj(hidden_states, key_shift_state)
339
+ else:
340
+ k = self.k_proj(hidden_states)
341
+ if self.use_v_shift:
342
+ v = self.v_proj(hidden_states, value_shift_state)
343
+ else:
344
+ v = self.v_proj(hidden_states)
345
+
346
+ if self.qk_norm and (not self.qk_norm_share_param_across_head):
347
+ q = self.q_norm(q).to(q.dtype)
348
+ k = self.k_norm(k).to(k.dtype)
349
+
350
+ q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
351
+ k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
352
+ v = rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
353
+
354
+
355
+ if self.qk_norm and (self.qk_norm_share_param_across_head):
356
+ q = self.q_norm(q).to(q.dtype)
357
+ k = self.k_norm(k).to(k.dtype)
358
+
359
+
360
+ seqlen_offset, max_seqlen = 0, q.shape[1]
361
+ if past_key_values is not None:
362
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
363
+ max_seqlen = q.shape[1] + seqlen_offset
364
+
365
+ if attention_mask is not None:
366
+ # to deliminate the offsets of padding tokens
367
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
368
+ max_seqlen = q.shape[1] + max(seqlen_offset)
369
+
370
+ if self.max_position_embeddings is not None:
371
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
372
+ if self.rotary is not None:
373
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
374
+
375
+ if self.fgate_type == "full":
376
+ fgate_logit = self.fgate_proj(hidden_states)
377
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
378
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
379
+ elif self.fgate_type == "none":
380
+ log_fgate = torch.zeros((batch_size, self.num_heads, q_len), dtype=torch.float32, device=hidden_states.device)
381
+ else:
382
+ assert self.fgate_type in ["fixed", "bias_only"]
383
+ fgate_logit = torch.broadcast_to(self.fgate_bias, (batch_size, q_len, self.num_heads))
384
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
385
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
386
+
387
+ k = rearrange(k, 'b t h d -> b h t d')
388
+ if past_key_values is not None:
389
+ k, v, log_fgate = past_key_values.update(k, v, log_fgate, self.layer_idx)
390
+ # k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
391
+ q = rearrange(q, 'b t h d -> b h t d')
392
+
393
+ if self.num_kv_groups > 1:
394
+ assert False
395
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
396
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
397
+
398
+ # Contains at least one padding token in the sequence
399
+ if attention_mask is not None:
400
+ B, _, T = log_fgate.size()
401
+ assert attention_mask.size() == (B, T), ((B, T), attention_mask.size())
402
+ seq_start = T - attention_mask.sum(dim=-1)
403
+ o = forgetting_attention(
404
+ q, k, v,
405
+ log_fgate,
406
+ head_first=True,
407
+ seq_start=seq_start,
408
+ sm_scale=1 / math.sqrt(self.head_dim),
409
+ )
410
+ o = rearrange(o, "b h t d -> b t h d")
411
+ else:
412
+ o = forgetting_attention(
413
+ q, k, v,
414
+ log_fgate,
415
+ head_first=True,
416
+ sm_scale=1 / math.sqrt(self.head_dim),
417
+ )
418
+ o = rearrange(o, "b h t d -> b t h d")
419
+
420
+ o = o.reshape(batch_size, q_len, self.hidden_size)
421
+
422
+ if self.output_norm is not None:
423
+ o = self.output_norm(o)
424
+
425
+ if self.ogate_proj is not None:
426
+ # ogate = self.ogate act(self.ogate_proj(hidden_states))
427
+ # o = o * ogate
428
+ # ogate = act_gate(self.ogate_proj(hidden_states), o)
429
+ ogate_logit = self.ogate_proj(hidden_states)
430
+ dtype = ogate_logit.dtype
431
+ if self.ogate_act == "silu":
432
+ o = swiglu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
433
+ elif self.ogate_act == "sigmoid":
434
+ o = glu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
435
+ else:
436
+ raise ValueError(f"Unknown ogate act {self.ogate_act}")
437
+ else:
438
+ o = self.o_proj(o)
439
+
440
+ if not output_attentions:
441
+ attentions = None
442
+ else:
443
+ SAVE_HEADS = [0, 1, 2, 3]
444
+ # (B, H, T, T)
445
+ score = q[:, SAVE_HEADS] @ k[:, SAVE_HEADS].mT
446
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
447
+ decay_bias = (log_lambda[:, SAVE_HEADS, :, None] - log_lambda[:, SAVE_HEADS, None, :]).to(torch.bfloat16)
448
+ # normalized_score = torch.softmax(score, dim=-1)
449
+ attentions = (score, decay_bias)
450
+
451
+ return o, attentions, past_key_values
452
+
453
+ def init_shift_state(self, batch_size: int):
454
+ param = next(self.parameters())
455
+ state = dict()
456
+ try:
457
+ dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled("cuda") else torch.float32
458
+ except TypeError:
459
+ # Support legacy torch version
460
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else torch.float32
461
+ if self.use_k_shift:
462
+ state['key_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
463
+ else:
464
+ state['key_shift'] = None
465
+ if self.use_v_shift:
466
+ state['value_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
467
+ else:
468
+ state['value_shift'] = None
469
+ return state
470
+
471
+
472
+ class ForgettingTransformerMLP(nn.Module):
473
+
474
+ def __init__(
475
+ self,
476
+ hidden_size: int,
477
+ hidden_ratio: Optional[float] = None,
478
+ intermediate_size: Optional[int] = None,
479
+ hidden_act: str = 'swish'
480
+ ) -> ForgettingTransformerMLP:
481
+ super().__init__()
482
+
483
+ self.hidden_size = hidden_size
484
+ # the final number of params is `hidden_ratio * hidden_size^2`
485
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
486
+ if hidden_ratio is None:
487
+ hidden_ratio = 4
488
+ if intermediate_size is None:
489
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
490
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
491
+ self.hidden_ratio = hidden_ratio
492
+ self.intermediate_size = intermediate_size
493
+
494
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
495
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
496
+ self.act_fn = ACT2FN[hidden_act]
497
+ self.hidden_act = hidden_act
498
+ assert hidden_act in ["swish", "sigmoid"]
499
+
500
+ def forward(self, x):
501
+ y = self.gate_proj(x)
502
+ gate, y = y.chunk(2, -1)
503
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
504
+ if self.hidden_act == "swish":
505
+ return swiglu_linear(
506
+ gate, y,
507
+ self.down_proj.weight.to(y.dtype),
508
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
509
+ )
510
+ elif self.hidden_act == "sigmoid":
511
+ return glu_linear(
512
+ gate, y,
513
+ self.down_proj.weight.to(y.dtype),
514
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
515
+ )
516
+ else:
517
+ raise ValueError()
518
+
519
+
520
+ class ForgettingTransformerBlock(nn.Module):
521
+ def __init__(self, config, layer_idx: int):
522
+ super().__init__()
523
+ self.hidden_size = config.hidden_size
524
+
525
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
526
+ self.attn = ForgettingAttentionLayer(
527
+ hidden_size=config.hidden_size,
528
+ num_heads=config.num_heads,
529
+ num_kv_heads=config.num_kv_heads,
530
+ window_size=config.window_size,
531
+ max_position_embeddings=config.max_position_embeddings,
532
+ rope_base=config.rope_base,
533
+ use_rope=config.use_rope,
534
+ use_output_gate=config.use_output_gate,
535
+ ogate_act=config.ogate_act,
536
+ fgate_type=config.fgate_type,
537
+ fgate_bias_init=config.fgate_bias_init,
538
+ decay_time_min=config.decay_time_min,
539
+ decay_time_max=config.decay_time_max,
540
+ use_output_norm = config.use_output_norm,
541
+ norm_eps=config.norm_eps,
542
+ qk_norm=config.qk_norm,
543
+ qk_norm_share_param_across_head=config.qk_norm_share_param_across_head,
544
+ use_k_shift=config.use_k_shift,
545
+ use_v_shift=config.use_v_shift,
546
+ initializer_range=config.initializer_range,
547
+ layer_idx=layer_idx
548
+ )
549
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
550
+ self.mlp = ForgettingTransformerMLP(
551
+ hidden_size=config.hidden_size,
552
+ hidden_ratio=config.hidden_ratio,
553
+ intermediate_size=config.intermediate_size,
554
+ hidden_act=config.hidden_act
555
+ )
556
+
557
+ def forward_attn(
558
+ self,
559
+ hidden_states: torch.Tensor,
560
+ attention_mask: Optional[torch.Tensor] = None,
561
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
562
+ output_attentions: Optional[bool] = False,
563
+ use_cache: Optional[bool] = False,
564
+ **kwargs,
565
+ ):
566
+ # residual handled outside of this
567
+ # residual = hidden_states
568
+ hidden_states = self.attn_norm(hidden_states)
569
+ hidden_states, attentions, past_key_values = self.attn(
570
+ hidden_states=hidden_states,
571
+ attention_mask=attention_mask,
572
+ past_key_values=past_key_values,
573
+ use_cache=use_cache,
574
+ output_attentions=output_attentions
575
+ )
576
+ return hidden_states, attentions, past_key_values
577
+
578
+ def forward_mlp(
579
+ self,
580
+ hidden_states: torch.Tensor,
581
+ residual: torch.Tensor,
582
+ ):
583
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
584
+ hidden_states = self.mlp(hidden_states)
585
+ hidden_states = residual + hidden_states
586
+
587
+ return hidden_states
588
+
589
+ def forward(
590
+ self,
591
+ hidden_states: torch.Tensor,
592
+ attention_mask: Optional[torch.Tensor] = None,
593
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
594
+ output_attentions: Optional[bool] = False,
595
+ use_cache: Optional[bool] = False,
596
+ gradient_checkpointing: bool = False
597
+ # **kwargs,
598
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
599
+
600
+ residual = hidden_states
601
+
602
+
603
+ if gradient_checkpointing:
604
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
605
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
606
+ else:
607
+ forward_attn = self.forward_attn
608
+ forward_mlp = self.forward_mlp
609
+
610
+ hidden_states, attentions, past_key_values = forward_attn(
611
+ hidden_states=hidden_states,
612
+ attention_mask=attention_mask,
613
+ past_key_values=past_key_values,
614
+ use_cache=use_cache,
615
+ output_attentions=output_attentions
616
+ )
617
+
618
+ hidden_states = forward_mlp(
619
+ hidden_states,
620
+ residual,
621
+ )
622
+
623
+ outputs = (hidden_states,)
624
+
625
+ if output_attentions:
626
+ outputs += (attentions,)
627
+
628
+ if use_cache:
629
+ outputs += (past_key_values,)
630
+
631
+ return outputs
632
+
633
+
634
+
635
+ class ForgettingTransformerPreTrainedModel(PreTrainedModel):
636
+
637
+ config_class = ForgettingTransformerConfig
638
+ supports_gradient_checkpointing = True
639
+ _no_split_modules = ['ForgettingTransformerBlock']
640
+
641
+ def __init__(self, config, *inputs, **kwargs):
642
+ # 动态修复 config_class 以支持远程代码加载
643
+ if hasattr(config, '__class__'):
644
+ config_module = config.__class__.__module__
645
+ if 'transformers_modules' in config_module or config_module == 'configuration_forgetting_transformer':
646
+ self.__class__.config_class = config.__class__
647
+ super().__init__(config, *inputs, **kwargs)
648
+
649
+ def _init_weights(
650
+ self,
651
+ module: nn.Module,
652
+ ):
653
+ # if isinstance(module, (nn.Linear, nn.Conv1d)):
654
+ if isinstance(module, (nn.Linear)):
655
+ # Slightly different from the TF version which uses truncated_normal for initialization
656
+ # cf https://github.com/pytorch/pytorch/pull/5617
657
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
658
+ if module.bias is not None:
659
+ nn.init.zeros_(module.bias)
660
+ elif isinstance(module, nn.Embedding):
661
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
662
+ if module.padding_idx is not None:
663
+ module.weight.data[module.padding_idx].zero_()
664
+
665
+
666
+ class ForgettingTransformerModel(ForgettingTransformerPreTrainedModel):
667
+
668
+ def __init__(self, config):
669
+ super().__init__(config)
670
+ self.padding_idx = config.pad_token_id
671
+ self.vocab_size = config.vocab_size
672
+
673
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
674
+ self.layers = nn.ModuleList([ForgettingTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
675
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
676
+
677
+ self.gradient_checkpointing = False
678
+
679
+ self.post_init()
680
+
681
+ def get_input_embeddings(self):
682
+ return self.embeddings
683
+
684
+ def set_input_embeddings(self, value):
685
+ self.embeddings = value
686
+
687
+ def forward(
688
+ self,
689
+ input_ids: Optional[torch.LongTensor] = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
692
+ inputs_embeds: Optional[torch.FloatTensor] = None,
693
+ use_cache: Optional[bool] = None,
694
+ output_attentions: Optional[bool] = None,
695
+ output_hidden_states: Optional[bool] = None,
696
+ return_dict: Optional[bool] = None
697
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
698
+ # if output_attentions:
699
+ # warnings.warn(
700
+ # "`ForgettingTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
701
+ # )
702
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
703
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
704
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
705
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
706
+
707
+ # retrieve input_ids and inputs_embeds
708
+ if input_ids is not None and inputs_embeds is not None:
709
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
710
+ elif input_ids is None and inputs_embeds is None:
711
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
712
+
713
+ if use_cache:
714
+ # use_legacy_cache = not isinstance(past_key_values, Cache)
715
+ # if use_legacy_cache:
716
+ # past_key_values = FgateDynamicCache.from_legacy_cache(past_key_values)
717
+ if past_key_values is None:
718
+ past_key_values = FgateDynamicCache()
719
+ for layer_idx, layer in enumerate(self.layers):
720
+ shift_state = layer.attn.init_shift_state(
721
+ batch_size=input_ids.size(0),
722
+ )
723
+ past_key_values.update_shift_cache(
724
+ key_shift_state=shift_state["key_shift"],
725
+ value_shift_state=shift_state["value_shift"],
726
+ layer_idx=layer_idx
727
+ )
728
+ else:
729
+ assert isinstance(past_key_values, FgateDynamicCache)
730
+
731
+ if inputs_embeds is None:
732
+ inputs_embeds = self.embeddings(input_ids)
733
+
734
+ # embed positions
735
+ hidden_states = inputs_embeds
736
+
737
+ if self.gradient_checkpointing and self.training:
738
+ if use_cache:
739
+ logger.warning_once(
740
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
741
+ )
742
+ use_cache = False
743
+
744
+ all_hidden_states = () if output_hidden_states else None
745
+ all_attns = {} if output_attentions else None
746
+ next_decoder_cache = None
747
+
748
+ for layer_id, layer in enumerate(self.layers):
749
+ if output_hidden_states:
750
+ all_hidden_states += (hidden_states,)
751
+
752
+ layer_outputs = layer(
753
+ hidden_states,
754
+ attention_mask=attention_mask,
755
+ past_key_values=past_key_values,
756
+ output_attentions=output_attentions,
757
+ use_cache=use_cache,
758
+ gradient_checkpointing=self.gradient_checkpointing and self.training
759
+ )
760
+
761
+ hidden_states = layer_outputs[0]
762
+
763
+ if use_cache:
764
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
765
+
766
+ if output_attentions:
767
+ OUTPUT_ATTN_LAYERS = [0, 7, 15, 23]
768
+ if layer_id in OUTPUT_ATTN_LAYERS:
769
+ # all_attns += (layer_outputs[1],)
770
+ all_attns[layer_id] = layer_outputs[1]
771
+
772
+ hidden_states = self.norm(hidden_states)
773
+
774
+ # add hidden states from the last decoder layer
775
+ if output_hidden_states:
776
+ all_hidden_states += (hidden_states,)
777
+
778
+ next_cache = None
779
+ if use_cache:
780
+ # next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
781
+ next_cache = next_decoder_cache
782
+ if not return_dict:
783
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
784
+
785
+ return BaseModelOutputWithPast(
786
+ last_hidden_state=hidden_states,
787
+ past_key_values=next_cache,
788
+ hidden_states=all_hidden_states,
789
+ attentions=all_attns
790
+ )
791
+
792
+
793
+ class ForgettingTransformerForCausalLM(ForgettingTransformerPreTrainedModel):
794
+ _tied_weights_keys = ["lm_head.weight"]
795
+
796
+ def __init__(self, config):
797
+ super().__init__(config)
798
+ self.model = ForgettingTransformerModel(config)
799
+ self.vocab_size = config.vocab_size
800
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
801
+
802
+ # Initialize weights and apply final processing
803
+ self.post_init()
804
+
805
+ def get_input_embeddings(self):
806
+ return self.model.embeddings
807
+
808
+ def set_input_embeddings(self, value):
809
+ self.model.embeddings = value
810
+
811
+ def get_output_embeddings(self):
812
+ return self.lm_head
813
+
814
+ def set_output_embeddings(self, new_embeddings):
815
+ self.lm_head = new_embeddings
816
+
817
+ def set_decoder(self, decoder):
818
+ self.model = decoder
819
+
820
+ def get_decoder(self):
821
+ return self.model
822
+
823
+ def prepare_inputs_for_generation(
824
+ self,
825
+ input_ids: torch.LongTensor = None,
826
+ past_key_values: Optional[torch.Tensor] = None,
827
+ attention_mask: Optional[torch.Tensor] = None,
828
+ inputs_embeds: Optional[torch.Tensor] = None,
829
+ **kwargs
830
+ ):
831
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
832
+ if past_key_values is not None:
833
+ input_ids = input_ids[:, -1:]
834
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
835
+ if inputs_embeds is not None and past_key_values is None:
836
+ model_inputs = {'inputs_embeds': inputs_embeds}
837
+ else:
838
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
839
+ # recompiles graphs as the stride of the inputs is a guard.
840
+ # Ref: https://github.com/huggingface/transformers/pull/29114
841
+ # TODO: use `next_tokens` directly instead.
842
+ model_inputs = {'input_ids': input_ids.contiguous()}
843
+
844
+ model_inputs.update({
845
+ 'past_key_values': past_key_values,
846
+ 'use_cache': kwargs.get('use_cache'),
847
+ 'attention_mask': attention_mask,
848
+ })
849
+ return model_inputs
850
+
851
+ def forward(
852
+ self,
853
+ input_ids: torch.LongTensor = None,
854
+ attention_mask: Optional[torch.Tensor] = None,
855
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
856
+ inputs_embeds: Optional[torch.FloatTensor] = None,
857
+ labels: Optional[torch.LongTensor] = None,
858
+ use_cache: Optional[bool] = None,
859
+ output_attentions: Optional[bool] = None,
860
+ output_hidden_states: Optional[bool] = None,
861
+ return_dict: Optional[bool] = None,
862
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
863
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
864
+ output_hidden_states = (
865
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
866
+ )
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+
869
+ outputs = self.model(
870
+ input_ids=input_ids,
871
+ attention_mask=attention_mask,
872
+ past_key_values=past_key_values,
873
+ inputs_embeds=inputs_embeds,
874
+ use_cache=use_cache,
875
+ output_attentions=output_attentions,
876
+ output_hidden_states=output_hidden_states,
877
+ return_dict=return_dict
878
+ )
879
+
880
+ hidden_states = outputs[0]
881
+
882
+ loss = None
883
+ if labels is not None:
884
+ if self.config.fuse_cross_entropy:
885
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
886
+ else:
887
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
888
+ logits = self.lm_head(hidden_states)
889
+ # Enable model parallelism
890
+ labels = labels.to(logits.device)
891
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
892
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
893
+ loss = loss.view(*labels.size())
894
+ del logits
895
+ logits = None
896
+ else:
897
+ logits = self.lm_head(hidden_states)
898
+
899
+ if not return_dict:
900
+ raise NotImplementedError
901
+ output = (logits,) + outputs[1:]
902
+ return (loss,) + output if loss is not None else output
903
+
904
+ return CausalLMOutputWithPast(
905
+ loss=loss,
906
+ logits=logits,
907
+ past_key_values=outputs.past_key_values,
908
+ hidden_states=outputs.hidden_states,
909
+ attentions=outputs.attentions,
910
+ )
ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sliding Window / Hard Attention
3
+ Based on "Context Limitations Make Neural Language Models More Human-Like"
4
+ (Kuribayashi et al., 2022)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def sliding_window_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ window_size: int = 2, # 默认2-gram(看前1个token)
23
+ ) -> torch.Tensor:
24
+ """
25
+ Sliding Window Attention
26
+
27
+ 硬截断:只能attend到最近window_size个token
28
+ """
29
+
30
+ if not head_first:
31
+ q = rearrange(q, "b t h d -> b h t d")
32
+ k = rearrange(k, "b t h d -> b h t d")
33
+ v = rearrange(v, "b t h d -> b h t d")
34
+
35
+ B, H, T_q, D = q.shape
36
+ T_k = k.shape[2]
37
+
38
+ if sm_scale is None:
39
+ sm_scale = 1.0 / math.sqrt(D)
40
+
41
+ # Compute logits
42
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
43
+
44
+ # Create sliding window mask
45
+ mask = create_sliding_window_mask(T_q, T_k, window_size, device=q.device)
46
+ logits = logits.masked_fill(~mask, float('-inf'))
47
+
48
+ # Seq start mask
49
+ if seq_start is not None:
50
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
51
+ logits = logits.masked_fill(seq_mask, float('-inf'))
52
+
53
+ # Standard softmax
54
+ weights = F.softmax(logits, dim=-1)
55
+
56
+ # Apply to values
57
+ out = torch.matmul(weights, v)
58
+
59
+ if not head_first:
60
+ out = rearrange(out, "b h t d -> b t h d")
61
+
62
+ return out
63
+
64
+
65
+ def create_sliding_window_mask(
66
+ T_q: int,
67
+ T_k: int,
68
+ window_size: int,
69
+ device: torch.device
70
+ ) -> torch.Tensor:
71
+ """
72
+ 创建sliding window mask
73
+
74
+ window_size=1: 只看前1个token (2-gram)
75
+ window_size=2: 只看前2个token (3-gram)
76
+ """
77
+ # 基础causal mask
78
+ mask = torch.tril(torch.ones(T_q, T_k, dtype=torch.bool, device=device))
79
+
80
+ # 应用window限制
81
+ if window_size > 0 and window_size < T_k:
82
+ for i in range(T_q):
83
+ # 只保留 [i-window_size+1, i] 范围
84
+ start = max(0, i - window_size + 1)
85
+ if start > 0:
86
+ mask[i, :start] = False
87
+
88
+ return mask[None, None, :, :] # [1, 1, T_q, T_k]
ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stick-breaking Attention - ICLR 2025
3
+ 基于论文 "Scaling Stick-Breaking Attention" (Tan et al., 2025)
4
+ 简化的PyTorch实现(不使用Triton)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from einops import rearrange
12
+ from typing import Optional
13
+
14
+
15
+ def stickbreaking_attention_std(
16
+ q: torch.Tensor,
17
+ k: torch.Tensor,
18
+ v: torch.Tensor,
19
+ *,
20
+ head_first: bool = False,
21
+ seq_start: Optional[torch.Tensor] = None,
22
+ sm_scale: Optional[float] = None,
23
+ normalize: bool = True,
24
+ attend_current: bool = False,
25
+ ) -> torch.Tensor:
26
+ """
27
+ Stick-breaking attention
28
+
29
+ Based on ICLR 2025 paper, simplified PyTorch implementation
30
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} softplus(z_{k,j}))
31
+
32
+ Args:
33
+ q: query [B, T, H, D] or [B, H, T, D] if head_first
34
+ k: key [B, T, H, D] or [B, H, T, D] if head_first
35
+ v: value [B, T, H, D] or [B, H, T, D] if head_first
36
+ attend_current: whether to attend to current position
37
+ normalize: whether to normalize attention weights
38
+ """
39
+
40
+ if not head_first:
41
+ q = rearrange(q, "b t h d -> b h t d")
42
+ k = rearrange(k, "b t h d -> b h t d")
43
+ v = rearrange(v, "b t h d -> b h t d")
44
+
45
+ B, H, T_q, D = q.shape
46
+ T_k = k.shape[2]
47
+
48
+ if sm_scale is None:
49
+ sm_scale = 1.0 / math.sqrt(D)
50
+
51
+ # Compute logits: QK^T / sqrt(d)
52
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
53
+ # [B, H, T_q, T_k]
54
+
55
+ # Causal mask (optional: mask diagonal if not attend_current)
56
+ if T_q == T_k and not attend_current:
57
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
58
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
59
+
60
+ # Seq start mask
61
+ if seq_start is not None:
62
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
63
+ logits = logits.masked_fill(seq_mask, float('-inf'))
64
+
65
+ # Stick-breaking weighting
66
+ attn_weights = stickbreaking_weighting(logits, normalize=normalize)
67
+
68
+ # Apply attention to values
69
+ out = torch.matmul(attn_weights.to(v.dtype), v)
70
+
71
+ if not head_first:
72
+ out = rearrange(out, "b h t d -> b t h d")
73
+
74
+ return out
75
+
76
+
77
+ def stickbreaking_weighting(
78
+ logits: torch.Tensor,
79
+ normalize: bool = True,
80
+ ) -> torch.Tensor:
81
+ """
82
+ Compute stick-breaking attention weights
83
+
84
+ From paper Equation 4:
85
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} log(1 + exp(z_{k,j})))
86
+
87
+ Where log(1 + exp(x)) is softplus(x)
88
+ """
89
+ B, H, T_q, T_k = logits.shape
90
+ device = logits.device
91
+
92
+ # Softplus: log(1 + exp(x))
93
+ # Numerically stable version from paper (Equation 5)
94
+ def softplus_stable(x):
95
+ # softplus(x) = log(1 + exp(x))
96
+ # When x > 15, exp(x) is huge, just return x
97
+ return torch.where(
98
+ x > 15.0,
99
+ x,
100
+ torch.log1p(torch.exp(torch.clamp(x, max=15.0)))
101
+ )
102
+
103
+ # Compute softplus for all logits
104
+ logits_sp = softplus_stable(logits) # [B, H, T_q, T_k]
105
+
106
+ # For each query position, compute cumulative sum
107
+ # We need to accumulate from left to right (position i to j-1)
108
+ log_weights = torch.zeros_like(logits)
109
+
110
+ for i in range(T_q):
111
+ # For query i, we compute attention to all keys j
112
+ z_i = logits[:, :, i, :] # [B, H, T_k]
113
+ z_sp_i = logits_sp[:, :, i, :] # [B, H, T_k]
114
+
115
+ # Cumulative sum of softplus
116
+ # csum[j] = ∑_{k=0}^{j} softplus(z_{i,k})
117
+ csum = z_sp_i.cumsum(dim=-1)
ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Transformer 的标准 Softmax Attention
3
+ 用于替换 flash_attn 的实现
4
+ """
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from einops import rearrange
9
+ from typing import Optional, Tuple
10
+
11
+ def vanilla_attention_std(
12
+ q: torch.Tensor,
13
+ k: torch.Tensor,
14
+ v: torch.Tensor,
15
+ causal: bool = True,
16
+ window_size: Optional[Tuple[int, int]] = None,
17
+ sm_scale: Optional[float] = None,
18
+ ) -> torch.Tensor:
19
+ """
20
+ 标准 Softmax Attention,兼容 flash_attn_func 的输入格式
21
+
22
+ Args:
23
+ q, k, v: [batch, seq_len, num_heads, head_dim] 格式
24
+ causal: 是否使用因果mask
25
+ window_size: 滑动窗口大小 (left, right),(-1, -1) 表示无限制
26
+ sm_scale: softmax 缩放因子
27
+
28
+ Returns:
29
+ output: [batch, seq_len, num_heads, head_dim] 格式
30
+ """
31
+ B, T_q, H, D = q.shape
32
+ T_k = k.shape[1]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 转换为 [B, H, T, D] 格式进行计算
38
+ q = rearrange(q, 'b t h d -> b h t d')
39
+ k = rearrange(k, 'b t h d -> b h t d')
40
+ v = rearrange(v, 'b t h d -> b h t d')
41
+
42
+ # 计算 attention scores
43
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
44
+
45
+ # Causal mask
46
+ if causal:
47
+ P_SEQ = T_k - T_q # 处理 KV cache 的情况
48
+ causal_mask = torch.triu(
49
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
50
+ diagonal=P_SEQ + 1
51
+ )
52
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
53
+
54
+ # Window mask (sliding window attention)
55
+ if window_size is not None and window_size != (-1, -1):
56
+ left_window, right_window = window_size
57
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
58
+ for i in range(T_q):
59
+ # 计算每个查询位置的有效窗口范围
60
+ start = max(0, i - left_window)
61
+ end = min(T_k, i + right_window + 1)
62
+ window_mask[i, start:end] = False
63
+ scores = scores.masked_fill(window_mask[None, None, :, :], float('-inf'))
64
+
65
+ # Softmax
66
+ attn_weights = F.softmax(scores, dim=-1)
67
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
68
+
69
+ # Apply attention to values
70
+ output = torch.matmul(attn_weights.to(v.dtype), v)
71
+
72
+ # 转换回 [B, T, H, D] 格式
73
+ output = rearrange(output, 'b h t d -> b t h d')
74
+
75
+ return output
76
+
77
+
78
+ def vanilla_attention_varlen_std(
79
+ q: torch.Tensor,
80
+ k: torch.Tensor,
81
+ v: torch.Tensor,
82
+ cu_seqlens_q: torch.Tensor,
83
+ cu_seqlens_k: torch.Tensor,
84
+ max_seqlen_q: int,
85
+ max_seqlen_k: int,
86
+ causal: bool = True,
87
+ window_size: Optional[Tuple[int, int]] = None,
88
+ sm_scale: Optional[float] = None,
89
+ ) -> torch.Tensor:
90
+ """
91
+ 变长序列的标准 Softmax Attention,兼容 flash_attn_varlen_func
92
+
93
+ Args:
94
+ q: [total_q_tokens, num_heads, head_dim]
95
+ k: [total_k_tokens, num_kv_heads, head_dim]
96
+ v: [total_k_tokens, num_kv_heads, head_dim]
97
+ cu_seqlens_q: 累积序列长度 [batch_size + 1]
98
+ cu_seqlens_k: 累积序列长度 [batch_size + 1]
99
+ max_seqlen_q: 最大查询序列长度
100
+ max_seqlen_k: 最大键值序列长度
101
+
102
+ Returns:
103
+ output: [total_q_tokens, num_heads, head_dim]
104
+ """
105
+ batch_size = cu_seqlens_q.shape[0] - 1
106
+ H = q.shape[1]
107
+ D = q.shape[2]
108
+
109
+ if sm_scale is None:
110
+ sm_scale = 1.0 / math.sqrt(D)
111
+
112
+ outputs = []
113
+
114
+ # 逐批次处理
115
+ for b in range(batch_size):
116
+ q_start, q_end = cu_seqlens_q[b].item(), cu_seqlens_q[b+1].item()
117
+ k_start, k_end = cu_seqlens_k[b].item(), cu_seqlens_k[b+1].item()
118
+
119
+ if q_start == q_end: # 空序列
120
+ continue
121
+
122
+ # 提取当前批次的 q, k, v
123
+ q_b = q[q_start:q_end] # [T_q, H, D]
124
+ k_b = k[k_start:k_end] # [T_k, H, D]
125
+ v_b = v[k_start:k_end] # [T_k, H, D]
126
+
127
+ T_q = q_b.shape[0]
128
+ T_k = k_b.shape[0]
129
+
130
+ # 转换为 [H, T, D] 格式
131
+ q_b = rearrange(q_b, 't h d -> h t d')
132
+ k_b = rearrange(k_b, 't h d -> h t d')
133
+ v_b = rearrange(v_b, 't h d -> h t d')
134
+
135
+ # 计算 attention scores
136
+ scores = torch.matmul(q_b.float(), k_b.float().transpose(-2, -1)) * sm_scale
137
+
138
+ # Causal mask
139
+ if causal:
140
+ P_SEQ = T_k - T_q
141
+ causal_mask = torch.triu(
142
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
143
+ diagonal=P_SEQ + 1
144
+ )
145
+ scores = scores.masked_fill(causal_mask[None, :, :], float('-inf'))
146
+
147
+ # Window mask
148
+ if window_size is not None and window_size != (-1, -1):
149
+ left_window, right_window = window_size
150
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
151
+ for i in range(T_q):
152
+ start = max(0, i - left_window)
153
+ end = min(T_k, i + right_window + 1)
154
+ window_mask[i, start:end] = False
155
+ scores = scores.masked_fill(window_mask[None, :, :], float('-inf'))
156
+
157
+ # Softmax
158
+ attn_weights = F.softmax(scores, dim=-1)
159
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
160
+
161
+ # Apply attention
162
+ output_b = torch.matmul(attn_weights.to(v_b.dtype), v_b)
163
+
164
+ # 转换回 [T, H, D] 格式
165
+ output_b = rearrange(output_b, 'h t d -> t h d')
166
+ outputs.append(output_b)
167
+
168
+ # 拼接所有批次的输出
169
+ output = torch.cat(outputs, dim=0)
170
+
171
+ return output
ops/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ # Framework mock for ndr compatibility
3
+ from . import framework_mock
ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
ops/__pycache__/direction_sensitive_geometric.cpython-310.pyc ADDED
Binary file (5.28 kB). View file
 
ops/__pycache__/forgetting_attention.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
ops/__pycache__/forgetting_attention_std.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
ops/__pycache__/framework_mock.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
ops/__pycache__/geometric_attention_final.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
ops/__pycache__/geometric_attention_std.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
ops/__pycache__/layer_with_visualization.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
ops/__pycache__/multi_head_attention.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
ops/__pycache__/multi_head_relative_pos_attention.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
ops/__pycache__/sliding_window_attention_std.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
ops/__pycache__/vanilla_attention_std.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
ops/direction_sensitive_geometric.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from forgetting_transformer.ops.multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
3
+ from typing import Optional
4
+ from forgetting_transformer.ops.geometric_attention import geometric_attention_activation
5
+ import math
6
+ from forgetting_transformer.ops.multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
7
+
8
+
9
+ class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
10
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
11
+ global_content_bias: bool = True, input_size: Optional[int] = None,
12
+ output_size: Optional[int] = None, normalize_score: bool = True):
13
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
14
+
15
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
16
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
17
+ self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
18
+
19
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
20
+ if global_content_bias else None
21
+
22
+ self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
23
+ self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
24
+ self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
25
+ self.normalize_score = normalize_score
26
+
27
+ self.input_size = state_size if input_size is None else input_size
28
+
29
+ print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
30
+
31
+ super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
32
+ self.reset_parameters()
33
+
34
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
35
+ q_content: torch.Tensor, k_content: torch.Tensor,
36
+ q_pos: torch.Tensor,
37
+ pos_offset: int) -> torch.Tensor:
38
+
39
+ # content-content addressing
40
+ logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
41
+
42
+ # directionality. Do scaling here, less flops.
43
+ prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
44
+ fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
45
+
46
+ logits = logits * self.scale + fpos + self.s_bias
47
+
48
+ logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
49
+
50
+ logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
51
+
52
+ return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
53
+
54
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
55
+ # data [batch * n_heads, len, c]
56
+ # bias [n_heads, c]
57
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
58
+ if bias is not None else data
59
+
60
+ def _attention(self, mask: Optional[torch.Tensor],
61
+ q_content: torch.Tensor, k_content: torch.Tensor,
62
+ q_pos: torch.Tensor,
63
+ v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
64
+
65
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
66
+
67
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
68
+ return self._attention_read(mask, scores, v)
69
+
70
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
71
+ pos_offset: int = 0, need_weights: bool = False):
72
+ # curr_state: [batch_size, out_len, c]
73
+ # attend_to: [batch_size, in_len, c]
74
+ batch_size, in_len = attend_to.shape[0:2]
75
+ out_len = curr_state.shape[1]
76
+
77
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
78
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
79
+ q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
80
+
81
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
82
+
83
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
84
+ pos_offset, need_weights=need_weights)
85
+
86
+ if need_weights:
87
+ return data, scores
88
+ else:
89
+ return data
90
+
91
+ def reset_parameters(self):
92
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
93
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
94
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
95
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
96
+
97
+ if self.global_content_bias is not None:
98
+ self.global_content_bias.data.fill_(0)
99
+
100
+
101
+ class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
102
+ def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
103
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
104
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
105
+
106
+ return torch.nn.init._no_grad_uniform_(tensor, -a, a)
107
+
108
+ def reset_parameters(self):
109
+ self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
110
+ self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
111
+ self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
112
+ self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
113
+
114
+ if self.global_content_bias is not None:
115
+ self.global_content_bias.data.fill_(0)
ops/direction_sensitive_geometric.py.bak ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
3
+ from typing import Optional
4
+ from .geometric_attention import geometric_attention_activation
5
+ import math
6
+ from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
7
+
8
+
9
+ class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
10
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
11
+ global_content_bias: bool = True, input_size: Optional[int] = None,
12
+ output_size: Optional[int] = None, normalize_score: bool = True):
13
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
14
+
15
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
16
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
17
+ self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
18
+
19
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
20
+ if global_content_bias else None
21
+
22
+ self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
23
+ self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
24
+ self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
25
+ self.normalize_score = normalize_score
26
+
27
+ self.input_size = state_size if input_size is None else input_size
28
+
29
+ print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
30
+
31
+ super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
32
+ self.reset_parameters()
33
+
34
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
35
+ q_content: torch.Tensor, k_content: torch.Tensor,
36
+ q_pos: torch.Tensor,
37
+ pos_offset: int) -> torch.Tensor:
38
+
39
+ # content-content addressing
40
+ logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
41
+
42
+ # directionality. Do scaling here, less flops.
43
+ prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
44
+ fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
45
+
46
+ logits = logits * self.scale + fpos + self.s_bias
47
+
48
+ logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
49
+
50
+ logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
51
+
52
+ return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
53
+
54
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
55
+ # data [batch * n_heads, len, c]
56
+ # bias [n_heads, c]
57
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
58
+ if bias is not None else data
59
+
60
+ def _attention(self, mask: Optional[torch.Tensor],
61
+ q_content: torch.Tensor, k_content: torch.Tensor,
62
+ q_pos: torch.Tensor,
63
+ v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
64
+
65
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
66
+
67
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
68
+ return self._attention_read(mask, scores, v)
69
+
70
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
71
+ pos_offset: int = 0, need_weights: bool = False):
72
+ # curr_state: [batch_size, out_len, c]
73
+ # attend_to: [batch_size, in_len, c]
74
+ batch_size, in_len = attend_to.shape[0:2]
75
+ out_len = curr_state.shape[1]
76
+
77
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
78
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
79
+ q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
80
+
81
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
82
+
83
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
84
+ pos_offset, need_weights=need_weights)
85
+
86
+ if need_weights:
87
+ return data, scores
88
+ else:
89
+ return data
90
+
91
+ def reset_parameters(self):
92
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
93
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
94
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
95
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
96
+
97
+ if self.global_content_bias is not None:
98
+ self.global_content_bias.data.fill_(0)
99
+
100
+
101
+ class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
102
+ def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
103
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
104
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
105
+
106
+ return torch.nn.init._no_grad_uniform_(tensor, -a, a)
107
+
108
+ def reset_parameters(self):
109
+ self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
110
+ self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
111
+ self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
112
+ self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
113
+
114
+ if self.global_content_bias is not None:
115
+ self.global_content_bias.data.fill_(0)
ops/forgetting_attention.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/forgetting_attention_std.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/framework_mock.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Mock framework module for ndr geometric attention
3
+ 只保留必要的部分
4
+ """
5
+ import torch
6
+ from typing import Optional, Any
7
+
8
+ class visualize:
9
+ """Mock visualize class"""
10
+ @staticmethod
11
+ def attention(*args, **kwargs):
12
+ """Dummy attention visualization"""
13
+ pass
14
+
15
+ @staticmethod
16
+ def plot(*args, **kwargs):
17
+ """Dummy plot"""
18
+ pass
19
+
20
+ # Mock其他可能需要的功能
21
+ def get_logger(name: str):
22
+ """Mock logger"""
23
+ import logging
24
+ return logging.getLogger(name)
25
+
ops/geometric_attention/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .cuda_interface import geometric_attention_activation
ops/geometric_attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
ops/geometric_attention/__pycache__/cuda_interface.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
ops/geometric_attention/cuda_interface.cu ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+ __global__ void k_cuda_log_sigmoid_forward(int N, float * t, float *out_sigm, float *out_one_minus_sigm){
4
+ int i = threadIdx.x + blockIdx.x * blockDim.x;
5
+ if (i<N){
6
+ float x = t[i];
7
+ float c = - log(exp(-abs(x)) + 1);
8
+ out_sigm[i] = min(x, 0.0f) + c;
9
+ out_one_minus_sigm[i] = -max(x, 0.0f) + c;
10
+ }
11
+ }
12
+
13
+ __global__ void k_cuda_log_sigmoid_backward(int N, float *t, float *grad_sigm, float *grad_one_minus_sigm, float *grad_out){
14
+ int i = threadIdx.x + blockIdx.x * blockDim.x;
15
+ if (i<N){
16
+ float x = t[i];
17
+ float ne = exp(-abs(x));
18
+ float coeff = 1.0 / (ne + 1.0) * ne;
19
+
20
+ float r_one_minus = (x > 0) ? (coeff - 1) : (-coeff);
21
+ float r = (x < 0) ? (coeff - 1) : (-coeff);
22
+ grad_out[i] = - grad_sigm[i] * r + grad_one_minus_sigm[i] * r_one_minus;
23
+ }
24
+ }
25
+
26
+ std::vector<torch::Tensor> cuda_log_sigmoid_forward(torch::Tensor input){
27
+ auto o1 = torch::empty_like(input);
28
+ auto o2 = torch::empty_like(input);
29
+ auto inf = input.flatten();
30
+
31
+ const int N = inf.size(0);
32
+
33
+ const int threads = 256;
34
+ const int blocks = (N + threads - 1) / threads;
35
+
36
+ k_cuda_log_sigmoid_forward<<<blocks, threads>>>(N,
37
+ input.data<float>(),
38
+ o1.data<float>(),
39
+ o2.data<float>());
40
+
41
+ return {o1, o2};
42
+ }
43
+
44
+ std::vector<torch::Tensor> cuda_log_sigmoid_backward(torch::Tensor input, torch::Tensor grad_sigm, torch::Tensor grad_one_minus_sigm){
45
+ auto output = torch::empty_like(input);
46
+ auto N = input.flatten().size(0);
47
+
48
+ const int threads = 256;
49
+ const int blocks = (N + threads - 1) / threads;
50
+
51
+ k_cuda_log_sigmoid_backward<<<blocks, threads>>>(N,
52
+ input.data<float>(),
53
+ grad_sigm.data<float>(),
54
+ grad_one_minus_sigm.data<float>(),
55
+ output.data<float>());
56
+
57
+ return {output};
58
+ }
59
+
60
+
61
+ typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor;
62
+
63
+ __global__ void k_cuda_window_sum_forward(float_accessor csum, float_accessor out, int offset){
64
+ const int in_p = threadIdx.z + blockIdx.z * blockDim.z;
65
+ const int out_p_mem = threadIdx.y + blockIdx.y * blockDim.y;
66
+ const int batch = threadIdx.x + blockIdx.x * blockDim.x;
67
+
68
+ const int out_p = out_p_mem + offset;
69
+
70
+ if (batch < out.size(0) & out_p_mem < out.size(1) & in_p < out.size(2)){
71
+ float res;
72
+ if (in_p == out_p){
73
+ res = 0;
74
+ } else {
75
+ const int offset = abs(out_p - in_p);
76
+ int p_i = out_p + offset - int(in_p > out_p);
77
+ const int n_i = out_p - offset;
78
+
79
+ p_i = min(p_i, out.size(2) - 1);
80
+
81
+ float d_n = (n_i >= 0) ? (csum[batch][out_p_mem][n_i]) : 0.0;
82
+ res = (csum[batch][out_p_mem][p_i]) - d_n;
83
+ }
84
+
85
+ out[batch][out_p_mem][in_p] = res;
86
+ }
87
+
88
+ }
89
+
90
+ __global__ void k_cuda_window_sum_backward(float_accessor grad_in, float_accessor grad_out, int offset){
91
+ const int in_p = threadIdx.z + blockIdx.z * blockDim.z;
92
+ const int out_p_mem = threadIdx.y + blockIdx.y * blockDim.y;
93
+ const int batch = threadIdx.x + blockIdx.x * blockDim.x;
94
+
95
+ const int out_p = out_p_mem + offset;
96
+
97
+ if (batch < grad_out.size(0) & out_p_mem < grad_out.size(1) & in_p < grad_out.size(2)){
98
+ const int other = 2 * out_p - in_p;
99
+
100
+ float res;
101
+ if (in_p == grad_out.size(2) - 1){
102
+ res = 0;
103
+ for (int i = 0; i < other + int(in_p != out_p); ++i){
104
+ res += grad_in[batch][out_p_mem][i];
105
+ }
106
+ } else if (in_p == out_p){
107
+ res = grad_in[batch][out_p_mem][min(in_p + 1, grad_out.size(2) - 1)];
108
+ } else if (in_p < out_p){
109
+ res = -grad_in[batch][out_p_mem][in_p];
110
+ if (other < grad_in.size(2))
111
+ res -= grad_in[batch][out_p_mem][other];
112
+ } else {
113
+ res = grad_in[batch][out_p_mem][in_p + 1];
114
+ if (other >= 0)
115
+ res += grad_in[batch][out_p_mem][other];
116
+ }
117
+
118
+ grad_out[batch][out_p_mem][in_p] = res;
119
+ }
120
+ }
121
+
122
+ dim3 get_grid_size(torch::Tensor target, dim3 block_dim){
123
+ return dim3(
124
+ (target.size(0) + block_dim.x - 1) / block_dim.x,
125
+ (target.size(1) + block_dim.y - 1) / block_dim.y,
126
+ (target.size(2) + block_dim.z - 1) / block_dim.z
127
+ );
128
+ }
129
+
130
+ torch::Tensor cuda_window_sum_forward(torch::Tensor input, int offset){
131
+ auto out = torch::empty_like(input);
132
+
133
+ dim3 block_size(2, 2, 32);
134
+ k_cuda_window_sum_forward<<<get_grid_size(input, block_size), block_size>>>(
135
+ input.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
136
+ out.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
137
+ offset
138
+ );
139
+
140
+ return out;
141
+ }
142
+
143
+ torch::Tensor cuda_window_sum_backward(torch::Tensor grad_in, int offset){
144
+ auto out = torch::empty_like(grad_in);
145
+
146
+ dim3 block_size(2, 2, 32);
147
+ k_cuda_window_sum_backward<<<get_grid_size(grad_in, block_size), block_size>>>(
148
+ grad_in.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
149
+ out.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
150
+ offset
151
+ );
152
+
153
+ return out;
154
+ }
155
+
156
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
157
+ m.def(
158
+ "cuda_log_sigmoid_forward",
159
+ &cuda_log_sigmoid_forward,
160
+ "Log sigmoid, forward pass"
161
+ );
162
+ m.def(
163
+ "cuda_log_sigmoid_backward",
164
+ &cuda_log_sigmoid_backward,
165
+ "Log sigmoid, backward pass"
166
+ );
167
+ m.def(
168
+ "cuda_window_sum_forward",
169
+ &cuda_window_sum_forward,
170
+ "Window sum, forward pass"
171
+ );
172
+ m.def(
173
+ "cuda_window_sum_backward",
174
+ &cuda_window_sum_backward,
175
+ "Window sum, backward pass"
176
+ );
177
+ }
ops/geometric_attention/cuda_interface.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import multiprocessing
4
+ from typing import Tuple, Optional
5
+ import torch.nn.functional as F
6
+ import filelock # 用filelock替代framework.utils.LockFile
7
+
8
+ # Just in time import
9
+ # https://pytorch.org/tutorials/advanced/cpp_extension
10
+
11
+ dirname = os.path.dirname(__file__)
12
+ filename = os.path.join(dirname, 'cuda_interface.cu')
13
+ outdir = "./cache/geometric_attention"
14
+ os.makedirs(outdir, exist_ok=True)
15
+
16
+ cuda_log_sigmoid_backward = None
17
+ cuda_log_sigmoid_forward = None
18
+ cuda_window_sum_forward = None
19
+ cuda_window_sum_backward = None
20
+
21
+ def load_extension():
22
+ global cuda_log_sigmoid_forward, cuda_log_sigmoid_backward
23
+ global cuda_window_sum_forward, cuda_window_sum_backward
24
+ if cuda_log_sigmoid_forward is not None:
25
+ return
26
+
27
+ # 使用filelock替代framework.utils.LockFile
28
+ lock = filelock.FileLock(outdir + "/lock.lock")
29
+ with lock:
30
+ from torch.utils.cpp_extension import load
31
+
32
+ os.environ["MAX_JOBS"] = str(multiprocessing.cpu_count())
33
+ ext = load(
34
+ extra_cuda_cflags=['--ftemplate-depth=1024'],
35
+ name="geometric_attention_cuda_interface",
36
+ sources=[filename], verbose=True)
37
+
38
+ cuda_log_sigmoid_forward = ext.cuda_log_sigmoid_forward
39
+ cuda_log_sigmoid_backward = ext.cuda_log_sigmoid_backward
40
+ cuda_window_sum_forward = ext.cuda_window_sum_forward
41
+ cuda_window_sum_backward = ext.cuda_window_sum_backward
42
+
43
+
44
+ class LogSigmoidFunction(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
47
+ x = x.detach().contiguous()
48
+ ctx.save_for_backward(x)
49
+ a, b = cuda_log_sigmoid_forward(x)
50
+ return a, b
51
+
52
+ @staticmethod
53
+ def backward(ctx, grad_in_sigm: torch.Tensor, grad_in_one_minus: torch.tensor) -> torch.Tensor:
54
+ xf, = ctx.saved_tensors
55
+ ga = grad_in_sigm.contiguous()
56
+ gb = grad_in_one_minus.contiguous()
57
+ return cuda_log_sigmoid_backward(xf, ga, gb)[0]
58
+
59
+
60
+ class WindowSumFunction(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward(ctx, csum: torch.Tensor, offset: int) -> torch.Tensor:
63
+ ctx.saved_offset = offset
64
+ c2 = csum.detach().contiguous().flatten(end_dim=-3)
65
+ res = cuda_window_sum_forward(c2, offset)
66
+ return res.view_as(csum)
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
70
+ offset = ctx.saved_offset
71
+ go = grad_output.contiguous().flatten(end_dim=-3)
72
+ res = cuda_window_sum_backward(go, offset)
73
+ return res.view_as(grad_output), None
74
+
75
+
76
+ def window_sum(x: torch.Tensor, offset: int) -> torch.Tensor:
77
+ load_extension()
78
+ return WindowSumFunction.apply(x, offset)
79
+
80
+
81
+ def log_sigmoid(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
82
+ load_extension()
83
+ return LogSigmoidFunction.apply(x)
84
+
85
+
86
+ def geometric_attention_activation(logits: torch.Tensor, mask: Optional[torch.Tensor] = None, pos_offset: int = 0,
87
+ normalize: bool = True) -> torch.Tensor:
88
+ p, one_minus_p = log_sigmoid(logits)
89
+ not_previos = window_sum(one_minus_p.cumsum(-1), pos_offset)
90
+
91
+ probs = (not_previos + p).exp()
92
+
93
+ return F.normalize(probs, 1, -1) if normalize else probs
ops/geometric_attention/cuda_interface.py.bak ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import multiprocessing
4
+ from framework.utils import LockFile
5
+ from typing import Tuple, Optional
6
+ import torch.nn.functional as F
7
+
8
+ # Just in time import
9
+ # https://pytorch.org/tutorials/advanced/cpp_extens
10
+
11
+ dirname = os.path.dirname(__file__)
12
+ filename = os.path.join(dirname, 'cuda_interface.cu')
13
+ outdir = "./cache/geometric_attention"
14
+ os.makedirs(outdir, exist_ok=True)
15
+
16
+ cuda_log_sigmoid_backward = None
17
+ cuda_log_sigmoid_forward = None
18
+ cuda_window_sum_forward = None
19
+ cuda_window_sum_backward = None
20
+
21
+ def load_extension():
22
+ global cuda_log_sigmoid_forward, cuda_log_sigmoid_backward
23
+ global cuda_window_sum_forward, cuda_window_sum_backward
24
+ if cuda_log_sigmoid_forward is not None:
25
+ return
26
+
27
+ with LockFile(outdir + "/lock"):
28
+ from torch.utils.cpp_extension import load
29
+
30
+ os.environ["MAX_JOBS"] = str(multiprocessing.cpu_count())
31
+ ext = load(
32
+ extra_cuda_cflags=['--ftemplate-depth=1024'],
33
+ name="geometric_attention_cuda_interface",
34
+ sources=[filename], verbose=True)
35
+ #, build_directory=outdir)
36
+
37
+ cuda_log_sigmoid_forward = ext.cuda_log_sigmoid_forward
38
+ cuda_log_sigmoid_backward = ext.cuda_log_sigmoid_backward
39
+ cuda_window_sum_forward = ext.cuda_window_sum_forward
40
+ cuda_window_sum_backward = ext.cuda_window_sum_backward
41
+
42
+
43
+ class LogSigmoidFunction(torch.autograd.Function):
44
+ @staticmethod
45
+ def forward(ctx, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
46
+ x = x.detach().contiguous()
47
+ ctx.save_for_backward(x)
48
+ a, b = cuda_log_sigmoid_forward(x)
49
+ return a, b
50
+ # return res_a.view_as(x), res_b.view_as(x)
51
+
52
+ @staticmethod
53
+ def backward(ctx, grad_in_sigm: torch.Tensor, grad_in_one_minus: torch.tensor) -> torch.Tensor:
54
+ xf, = ctx.saved_tensors
55
+ ga = grad_in_sigm.contiguous()
56
+ gb = grad_in_one_minus.contiguous()
57
+ return cuda_log_sigmoid_backward(xf, ga, gb)[0]
58
+
59
+
60
+ class WindowSumFunction(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward(ctx, csum: torch.Tensor, offset: int) -> torch.Tensor:
63
+ ctx.saved_offset = offset
64
+ c2 = csum.detach().contiguous().flatten(end_dim=-3)
65
+ res = cuda_window_sum_forward(c2, offset)
66
+ return res.view_as(csum)
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
70
+ offset = ctx.saved_offset
71
+ go = grad_output.contiguous().flatten(end_dim=-3)
72
+ res = cuda_window_sum_backward(go, offset)
73
+ return res.view_as(grad_output), None
74
+
75
+
76
+ def window_sum(x: torch.Tensor, offset: int) -> torch.Tensor:
77
+ load_extension()
78
+ return WindowSumFunction.apply(x, offset)
79
+
80
+
81
+ def log_sigmoid(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
82
+ load_extension()
83
+ return LogSigmoidFunction.apply(x)
84
+
85
+
86
+ def geometric_attention_activation(logits: torch.Tensor, mask: Optional[torch.Tensor] = None, pos_offset: int = 0,
87
+ normalize: bool = True) -> torch.Tensor:
88
+ p, one_minus_p = log_sigmoid(logits)
89
+ not_previos = window_sum(one_minus_p.cumsum(-1), pos_offset)
90
+
91
+ probs = (not_previos + p).exp()
92
+
93
+ # return probs
94
+ return F.normalize(probs, 1, -1) if normalize else probs
ops/geometric_attention_final.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - CUDA加速版本 (支持FP16)
3
+ """
4
+
5
+ import math
6
+ import torch
7
+ from einops import rearrange
8
+ from typing import Optional
9
+
10
+ # 尝试导入CUDA版本
11
+ try:
12
+ from forgetting_transformer.ops.geometric_attention.cuda_interface import (
13
+ load_extension,
14
+ geometric_attention_activation,
15
+ )
16
+ load_extension()
17
+ HAS_CUDA = True
18
+ print("✅ Using CUDA geometric attention (with FP16 support)")
19
+ except Exception as e:
20
+ HAS_CUDA = False
21
+ print(f"⚠️ CUDA not available: {e}")
22
+
23
+
24
+ def geometric_attention_cuda(
25
+ q: torch.Tensor,
26
+ k: torch.Tensor,
27
+ v: torch.Tensor,
28
+ *,
29
+ head_first: bool = False,
30
+ seq_start: Optional[torch.Tensor] = None,
31
+ sm_scale: Optional[float] = None,
32
+ normalize: bool = True,
33
+ ) -> torch.Tensor:
34
+ if not HAS_CUDA:
35
+ raise RuntimeError("CUDA not available")
36
+
37
+ # ⭐ 保存原始dtype
38
+ original_dtype = q.dtype
39
+ needs_cast = original_dtype == torch.float16
40
+
41
+ # ⭐ 如果是FP16,转成FP32
42
+ if needs_cast:
43
+ q = q.float()
44
+ k = k.float()
45
+ v = v.float()
46
+
47
+ # Rearrange
48
+ if not head_first:
49
+ q = rearrange(q, "b t h d -> b h t d")
50
+ k = rearrange(k, "b t h d -> b h t d")
51
+ v = rearrange(v, "b t h d -> b h t d")
52
+
53
+ B, H, T_q, D = q.shape
54
+
55
+ if sm_scale is None:
56
+ sm_scale = 1.0 / math.sqrt(D)
57
+
58
+ # Attention scores
59
+ logits = torch.matmul(q, k.transpose(-2, -1)) * sm_scale
60
+
61
+ # CUDA kernel (FP32)
62
+ attn_weights = geometric_attention_activation(
63
+ logits, mask=None, pos_offset=0, normalize=normalize
64
+ )
65
+
66
+ # Apply to values
67
+ output = torch.matmul(attn_weights, v)
68
+
69
+ # Rearrange back
70
+ if not head_first:
71
+ output = rearrange(output, "b h t d -> b t h d")
72
+
73
+ # ⭐ 转回原始dtype
74
+ if needs_cast:
75
+ output = output.to(original_dtype)
76
+
77
+ return output
78
+
79
+
80
+ def geometric_attention(
81
+ q: torch.Tensor,
82
+ k: torch.Tensor,
83
+ v: torch.Tensor,
84
+ *,
85
+ head_first: bool = False,
86
+ seq_start: Optional[torch.Tensor] = None,
87
+ sm_scale: Optional[float] = None,
88
+ normalize: bool = True,
89
+ ) -> torch.Tensor:
90
+ """自动选择CUDA或Python"""
91
+
92
+ if HAS_CUDA and q.is_cuda:
93
+ try:
94
+ return geometric_attention_cuda(
95
+ q, k, v, head_first=head_first,
96
+ seq_start=seq_start, sm_scale=sm_scale,
97
+ normalize=normalize
98
+ )
99
+ except Exception as e:
100
+ # 不打印太多警告,会刷屏
101
+ pass
102
+
103
+ # Fallback
104
+ from forgetting_transformer.ops.geometric_attention_std import geometric_attention_std
105
+ return geometric_attention_std(
106
+ q, k, v, head_first=head_first,
107
+ seq_start=seq_start, sm_scale=sm_scale,
108
+ normalize=normalize
109
+ )
ops/geometric_attention_std.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/layer_with_visualization.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ from typing import Dict, Any
4
+
5
+
6
+ class LayerWithVisualization(torch.nn.Module):
7
+ def __init__(self):
8
+ super().__init__()
9
+ self.visualization_enabled = False
10
+
11
+ def prepare(self):
12
+ # Should be called before the training step
13
+ pass
14
+
15
+ def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:
16
+ raise NotImplementedError()
17
+
18
+
19
+ class LayerVisualizer:
20
+ def __init__(self, module: torch.nn.Module, options: Dict[str, Any] = {}):
21
+ self.modules = []
22
+ self.options = options
23
+ self.curr_options = None
24
+ for n, m in module.named_modules():
25
+ if isinstance(m, LayerWithVisualization):
26
+ self.modules.append((n, m))
27
+
28
+ def plot(self) -> Dict[str, Any]:
29
+ res = {}
30
+ for n, m in self.modules:
31
+ res.update({f"{n}/{k}": v for k, v in m.plot(self.curr_options).items()})
32
+ m.visualization_enabled = False
33
+
34
+ self.curr_options = None
35
+ return res
36
+
37
+ def prepare(self, options: Dict[str, Any] = {}):
38
+ self.curr_options = self.options.copy()
39
+ self.curr_options.update(options)
40
+
41
+ for _, m in self.modules:
42
+ m.prepare()
43
+ m.visualization_enabled = True