msj19 commited on
Commit
2efa03f
·
verified ·
1 Parent(s): 2b1ea17

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. fla2/layers/__pycache__/attn.cpython-39.pyc +0 -0
  2. fla2/layers/__pycache__/based.cpython-312.pyc +0 -0
  3. fla2/layers/__pycache__/based.cpython-39.pyc +0 -0
  4. fla2/layers/__pycache__/delta_net.cpython-38.pyc +0 -0
  5. fla2/layers/__pycache__/delta_net.cpython-39.pyc +0 -0
  6. fla2/layers/__pycache__/emdeltanet.cpython-310.pyc +0 -0
  7. fla2/layers/__pycache__/emla.cpython-310.pyc +0 -0
  8. fla2/layers/__pycache__/emla.cpython-38.pyc +0 -0
  9. fla2/layers/__pycache__/gla.cpython-38.pyc +0 -0
  10. fla2/layers/__pycache__/gsa.cpython-312.pyc +0 -0
  11. fla2/layers/__pycache__/gsa.cpython-38.pyc +0 -0
  12. fla2/layers/__pycache__/hgrn.cpython-39.pyc +0 -0
  13. fla2/layers/__pycache__/hgrn2.cpython-39.pyc +0 -0
  14. fla2/layers/__pycache__/mask_deltanet.cpython-312.pyc +0 -0
  15. fla2/layers/__pycache__/mask_gdn.cpython-312.pyc +0 -0
  16. fla2/layers/__pycache__/multiscale_retention.cpython-312.pyc +0 -0
  17. fla2/layers/__pycache__/rebased.cpython-312.pyc +0 -0
  18. fla2/layers/__pycache__/rwkv6.cpython-312.pyc +0 -0
  19. fla2/layers/__pycache__/rwkv6.cpython-38.pyc +0 -0
  20. fla2/layers/__pycache__/rwkv6.cpython-39.pyc +0 -0
  21. fla2/models/abc/__pycache__/configuration_abc.cpython-312.pyc +0 -0
  22. fla2/models/abc/__pycache__/configuration_abc.cpython-38.pyc +0 -0
  23. fla2/models/abc/__pycache__/modeling_abc.cpython-39.pyc +0 -0
  24. fla2/models/delta_net/__pycache__/__init__.cpython-38.pyc +0 -0
  25. fla2/models/delta_net/__pycache__/configuration_delta_net.cpython-39.pyc +0 -0
  26. fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-312.pyc +0 -0
  27. fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-38.pyc +0 -0
  28. fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-39.pyc +0 -0
  29. fla2/models/emdeltanet/__init__.py +12 -0
  30. fla2/models/emdeltanet/__pycache__/__init__.cpython-312.pyc +0 -0
  31. fla2/models/emdeltanet/__pycache__/configuration_emdeltanet.cpython-310.pyc +0 -0
  32. fla2/models/emdeltanet/__pycache__/configuration_emdeltanet.cpython-312.pyc +0 -0
  33. fla2/models/emdeltanet/__pycache__/configuration_emgla.cpython-310.pyc +0 -0
  34. fla2/models/emdeltanet/__pycache__/modeling_emdeltanet.cpython-310.pyc +0 -0
  35. fla2/models/emdeltanet/__pycache__/modeling_emdeltanet.cpython-312.pyc +0 -0
  36. fla2/models/emdeltanet/__pycache__/modeling_emgla.cpython-310.pyc +0 -0
  37. fla2/models/emdeltanet/modeling_emdeltanet.py +535 -0
  38. fla2/models/emgla-noaux/__init__.py +12 -0
  39. fla2/models/emgla-noaux/__pycache__/__init__.cpython-310.pyc +0 -0
  40. fla2/models/emgla-noaux/__pycache__/configuration_emgla.cpython-310.pyc +0 -0
  41. fla2/models/emgla-noaux/__pycache__/modeling_emgla.cpython-310.pyc +0 -0
  42. fla2/models/emgla-noaux/configuration_emgla.py +95 -0
  43. fla2/models/emgla-noaux/modeling_emgla.py +414 -0
  44. fla2/models/emgla/__init__.py +12 -0
  45. fla2/models/emgla/__pycache__/__init__.cpython-310.pyc +0 -0
  46. fla2/models/emgla/__pycache__/__init__.cpython-312.pyc +0 -0
  47. fla2/models/emgla/__pycache__/configuration_emgla.cpython-310.pyc +0 -0
  48. fla2/models/emgla/__pycache__/configuration_emgla.cpython-312.pyc +0 -0
  49. fla2/models/emgla/__pycache__/modeling_emgla.cpython-310.pyc +0 -0
  50. fla2/models/emgla/__pycache__/modeling_emgla.cpython-312.pyc +0 -0
fla2/layers/__pycache__/attn.cpython-39.pyc ADDED
Binary file (4.77 kB). View file
 
fla2/layers/__pycache__/based.cpython-312.pyc ADDED
Binary file (7.08 kB). View file
 
fla2/layers/__pycache__/based.cpython-39.pyc ADDED
Binary file (3.64 kB). View file
 
fla2/layers/__pycache__/delta_net.cpython-38.pyc ADDED
Binary file (6.39 kB). View file
 
fla2/layers/__pycache__/delta_net.cpython-39.pyc ADDED
Binary file (6.37 kB). View file
 
fla2/layers/__pycache__/emdeltanet.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
fla2/layers/__pycache__/emla.cpython-310.pyc ADDED
Binary file (7.51 kB). View file
 
fla2/layers/__pycache__/emla.cpython-38.pyc ADDED
Binary file (7.45 kB). View file
 
fla2/layers/__pycache__/gla.cpython-38.pyc ADDED
Binary file (8.53 kB). View file
 
fla2/layers/__pycache__/gsa.cpython-312.pyc ADDED
Binary file (13.1 kB). View file
 
fla2/layers/__pycache__/gsa.cpython-38.pyc ADDED
Binary file (6.64 kB). View file
 
fla2/layers/__pycache__/hgrn.cpython-39.pyc ADDED
Binary file (4.47 kB). View file
 
fla2/layers/__pycache__/hgrn2.cpython-39.pyc ADDED
Binary file (5.01 kB). View file
 
fla2/layers/__pycache__/mask_deltanet.cpython-312.pyc ADDED
Binary file (17.2 kB). View file
 
fla2/layers/__pycache__/mask_gdn.cpython-312.pyc ADDED
Binary file (17.7 kB). View file
 
fla2/layers/__pycache__/multiscale_retention.cpython-312.pyc ADDED
Binary file (13.5 kB). View file
 
fla2/layers/__pycache__/rebased.cpython-312.pyc ADDED
Binary file (8.54 kB). View file
 
fla2/layers/__pycache__/rwkv6.cpython-312.pyc ADDED
Binary file (14 kB). View file
 
fla2/layers/__pycache__/rwkv6.cpython-38.pyc ADDED
Binary file (7.85 kB). View file
 
fla2/layers/__pycache__/rwkv6.cpython-39.pyc ADDED
Binary file (7.83 kB). View file
 
fla2/models/abc/__pycache__/configuration_abc.cpython-312.pyc ADDED
Binary file (2.65 kB). View file
 
fla2/models/abc/__pycache__/configuration_abc.cpython-38.pyc ADDED
Binary file (1.93 kB). View file
 
fla2/models/abc/__pycache__/modeling_abc.cpython-39.pyc ADDED
Binary file (10.9 kB). View file
 
fla2/models/delta_net/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (542 Bytes). View file
 
fla2/models/delta_net/__pycache__/configuration_delta_net.cpython-39.pyc ADDED
Binary file (1.96 kB). View file
 
fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-312.pyc ADDED
Binary file (18.6 kB). View file
 
fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-38.pyc ADDED
Binary file (11.5 kB). View file
 
fla2/models/delta_net/__pycache__/modeling_delta_net.cpython-39.pyc ADDED
Binary file (11.5 kB). View file
 
fla2/models/emdeltanet/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
4
+
5
+ from .configuration_emdeltanet import emdeltanetConfig
6
+ from .modeling_emdeltanet import emdeltanetForCausalLM, emdeltanetModel
7
+
8
+ AutoConfig.register(emdeltanetConfig.model_type, emdeltanetConfig)
9
+ AutoModel.register(emdeltanetConfig, emdeltanetModel)
10
+ AutoModelForCausalLM.register(emdeltanetConfig, emdeltanetForCausalLM)
11
+
12
+ __all__ = ['emdeltanetConfig', 'emdeltanetForCausalLM', 'emdeltanetModel']
fla2/models/emdeltanet/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (715 Bytes). View file
 
fla2/models/emdeltanet/__pycache__/configuration_emdeltanet.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
fla2/models/emdeltanet/__pycache__/configuration_emdeltanet.cpython-312.pyc ADDED
Binary file (3.84 kB). View file
 
fla2/models/emdeltanet/__pycache__/configuration_emgla.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
fla2/models/emdeltanet/__pycache__/modeling_emdeltanet.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
fla2/models/emdeltanet/__pycache__/modeling_emdeltanet.cpython-312.pyc ADDED
Binary file (23.2 kB). View file
 
fla2/models/emdeltanet/__pycache__/modeling_emgla.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
fla2/models/emdeltanet/modeling_emdeltanet.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union,Iterable
8
+ from einops import rearrange
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.generation import GenerationMixin
13
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
14
+ from transformers.modeling_utils import PreTrainedModel
15
+ from transformers.utils import logging
16
+ from transformers.utils.deprecation import deprecate_kwarg
17
+
18
+ from ...layers.attn import Attention
19
+ from ...layers.emdeltanet import emdeltanet
20
+ from ...models.emdeltanet.configuration_emdeltanet import emdeltanetConfig
21
+ from fla.models.utils import Cache
22
+ from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
23
+ from fla.modules import GatedMLP as emdeltanetMLP
24
+ from ...modules import RMSNorm
25
+ from ...modules import RotaryEmbedding
26
+ logger = logging.get_logger(__name__)
27
+
28
+ if TYPE_CHECKING:
29
+ from transformers.processing_utils import Unpack
30
+
31
+
32
+ class emdeltanetBlock(nn.Module):
33
+ def __init__(self, config: emdeltanetConfig, layer_idx: int):
34
+ super().__init__()
35
+
36
+ self.config = config
37
+ self.layer_idx = layer_idx
38
+ print(config)
39
+ self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
40
+ if config.attn is not None and layer_idx in config.attn['layers']:
41
+ self.attn = Attention(
42
+ hidden_size=config.hidden_size,
43
+ num_heads=config.attn['num_heads'],
44
+ num_kv_heads=config.attn['num_kv_heads'],
45
+ qkv_bias=config.attn['qkv_bias'],
46
+ window_size=config.attn['window_size'],
47
+ rope_theta=config.attn['rope_theta'],
48
+ max_position_embeddings=config.max_position_embeddings,
49
+ layer_idx=layer_idx
50
+ )
51
+ else:
52
+ self.attn = emdeltanet(
53
+ mode=config.attn_mode,
54
+ hidden_size=config.hidden_size,
55
+ expand_k=config.expand_k,
56
+ expand_v=config.expand_v,
57
+ num_heads=config.num_heads,
58
+ use_gate=config.use_gate,
59
+ use_short_conv=config.use_short_conv,
60
+ use_output_norm=config.use_output_norm,
61
+ conv_size=config.conv_size,
62
+ norm_eps=config.norm_eps,
63
+ ratio = config.ratio,
64
+ topk = config.topk,
65
+ layer_idx=layer_idx
66
+ )
67
+ self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
68
+ self.mlp = emdeltanetMLP(
69
+ hidden_size=config.hidden_size,
70
+ hidden_ratio=config.hidden_ratio,
71
+ intermediate_size=config.intermediate_size,
72
+ hidden_act=config.hidden_act,
73
+ fuse_swiglu=config.fuse_swiglu
74
+ )
75
+
76
+ def forward(
77
+ self,
78
+ hidden_states: torch.Tensor,
79
+ attention_mask: Optional[torch.Tensor] = None,
80
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
81
+ use_cache: Optional[bool] = False,
82
+ output_attentions: Optional[bool] = False,
83
+ **kwargs: Unpack[Dict]
84
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
85
+ residual = hidden_states
86
+ hidden_states = self.attn_norm(hidden_states)
87
+ hidden_states, attentions, past_key_values,router_logits = self.attn(
88
+ hidden_states=hidden_states,
89
+ attention_mask=attention_mask,
90
+ past_key_values=past_key_values,
91
+ use_cache=use_cache,
92
+ output_attentions=output_attentions,
93
+ **kwargs
94
+ )
95
+ if self.config.fuse_norm:
96
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
97
+ else:
98
+ hidden_states = residual + hidden_states
99
+ residual = hidden_states
100
+ hidden_states = self.mlp_norm(hidden_states)
101
+ hidden_states = self.mlp(hidden_states, **kwargs)
102
+ hidden_states = residual + hidden_states
103
+
104
+ outputs = (hidden_states, attentions, past_key_values,router_logits)
105
+
106
+ return outputs
107
+
108
+
109
+ class emdeltanetPreTrainedModel(PreTrainedModel):
110
+
111
+ config_class = emdeltanetConfig
112
+ base_model_prefix = 'model'
113
+ supports_gradient_checkpointing = True
114
+ _no_split_modules = ['emdeltanetBlock']
115
+ _supports_cache_class = True
116
+
117
+ def __init__(self, *inputs, **kwargs):
118
+ super().__init__(*inputs, **kwargs)
119
+
120
+ def _init_weights(
121
+ self,
122
+ module: nn.Module,
123
+ prenorm_residual_strategy: Optional[str] = 'rescale',
124
+ num_residuals_per_layer: int = 2,
125
+ ):
126
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
127
+ # Slightly different from the TF version which uses truncated_normal for initialization
128
+ # cf https://github.com/pytorch/pytorch/pull/5617
129
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
130
+ if module.bias is not None:
131
+ nn.init.zeros_(module.bias)
132
+ elif isinstance(module, nn.Embedding):
133
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
134
+ elif hasattr(module, 'reset_parameters'):
135
+ module.reset_parameters()
136
+
137
+ if prenorm_residual_strategy is not None:
138
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
139
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
140
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
141
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
142
+ #
143
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
144
+ p = None
145
+ if hasattr(module, 'o_proj'):
146
+ p = module.o_proj.weight
147
+ elif hasattr(module, 'down_proj'):
148
+ p = module.down_proj.weight
149
+ if p is not None:
150
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
151
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
152
+ # We need to reinit p since this code could be called multiple times
153
+ # Having just p *= scale would repeatedly scale it down
154
+ if prenorm_residual_strategy == 'rescale':
155
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
156
+ with torch.no_grad():
157
+ p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
158
+ elif prenorm_residual_strategy == 'zero':
159
+ nn.init.zeros_(p)
160
+ else:
161
+ raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
162
+
163
+
164
+ class emdeltanetModel(emdeltanetPreTrainedModel):
165
+
166
+ def __init__(self, config: emdeltanetConfig):
167
+ super().__init__(config)
168
+ self.padding_idx = config.pad_token_id
169
+ self.vocab_size = config.vocab_size
170
+
171
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
172
+ self.layers = nn.ModuleList([emdeltanetBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
173
+ self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
174
+
175
+ self.gradient_checkpointing = False
176
+
177
+ self.post_init()
178
+
179
+ def get_input_embeddings(self):
180
+ return self.embeddings
181
+
182
+ def set_input_embeddings(self, value):
183
+ self.embeddings = value
184
+
185
+ def forward(
186
+ self,
187
+ input_ids: Optional[torch.LongTensor] = None,
188
+ attention_mask: Optional[torch.Tensor] = None, # noqa
189
+ inputs_embeds: Optional[torch.FloatTensor] = None,
190
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
191
+ use_cache: Optional[bool] = None,
192
+ output_attentions: Optional[bool] = None,
193
+ output_hidden_states: Optional[bool] = None,
194
+ return_dict: Optional[bool] = None,
195
+ **kwargs: Unpack[Dict]
196
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
197
+ if output_attentions:
198
+ warnings.warn("`emdeltanetModel` does not `output_attentions` now, setting it to `False`.")
199
+ output_attentions = False
200
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
201
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
202
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
203
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
204
+
205
+ # retrieve input_ids and inputs_embeds
206
+ if input_ids is not None and inputs_embeds is not None:
207
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
208
+ if input_ids is None and inputs_embeds is None:
209
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
210
+
211
+ if inputs_embeds is None:
212
+ inputs_embeds = self.embeddings(input_ids)
213
+ hidden_states = inputs_embeds
214
+
215
+ if use_cache and not isinstance(past_key_values, Cache):
216
+ past_key_values = Cache.from_legacy_cache(past_key_values)
217
+
218
+ if self.gradient_checkpointing and self.training and use_cache:
219
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
220
+ use_cache = False
221
+
222
+ all_hidden_states = () if output_hidden_states else None
223
+ all_attns = () if output_attentions else None
224
+ all_router_logits = ()
225
+ for layer in self.layers:
226
+ if output_hidden_states:
227
+ all_hidden_states += (hidden_states,)
228
+
229
+ if self.gradient_checkpointing and self.training:
230
+ hidden_states, attentions, past_key_values, router_logits = self._gradient_checkpointing_func(
231
+ layer.__call__,
232
+ hidden_states,
233
+ attention_mask,
234
+ past_key_values,
235
+ use_cache,
236
+ output_attentions,
237
+ **kwargs
238
+ )
239
+ else:
240
+ hidden_states, attentions, past_key_values, router_logits = layer(
241
+ hidden_states,
242
+ attention_mask=attention_mask,
243
+ past_key_values=past_key_values,
244
+ use_cache=use_cache,
245
+ output_attentions=output_attentions,
246
+ **kwargs
247
+ )
248
+
249
+ if output_attentions:
250
+ all_attns += (attentions,)
251
+ all_router_logits += (router_logits,)
252
+ hidden_states = self.norm(hidden_states)
253
+
254
+ # add hidden states from the last decoder layer
255
+ if output_hidden_states:
256
+ all_hidden_states += (hidden_states,)
257
+
258
+ if not return_dict:
259
+ return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
260
+ # return BaseModelOutputWithPast(
261
+ # last_hidden_state=hidden_states,
262
+ # past_key_values=past_key_values,
263
+ # hidden_states=all_hidden_states,
264
+ # attentions=all_attns
265
+ # )
266
+ return emdeltanetOutputWithPast(
267
+ last_hidden_state=hidden_states,
268
+ past_key_values=past_key_values,
269
+ hidden_states=all_hidden_states,
270
+ attentions=all_attns,
271
+ router_logits=all_router_logits
272
+ )
273
+
274
+
275
+ from dataclasses import dataclass
276
+ @dataclass
277
+ class emdeltanetOutputWithPast(BaseModelOutputWithPast):
278
+ router_logits: Optional[Tuple[torch.FloatTensor, ...]] = None
279
+
280
+
281
+ @dataclass
282
+ class emdeltanetCausalLMOutputWithPast(CausalLMOutputWithPast):
283
+ aux_loss: Optional[torch.FloatTensor] = None
284
+ router_logits: Optional[Tuple[torch.FloatTensor, ...]] = None
285
+
286
+ def load_balancing_loss_func(
287
+ gate_logits: Union[torch.Tensor, Tuple],
288
+ num_memories: torch.Tensor = None,
289
+ top_k=2,
290
+ use_layer_wise_balance=False,
291
+ ) -> torch.FloatTensor:
292
+ r"""
293
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
294
+
295
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
296
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
297
+ experts is too unbalanced.
298
+
299
+ Args:
300
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
301
+ Logits from the `gate`, should be a tuple of tensors. Shape: [batch_size, seqeunce_length, num_memories].
302
+ num_memories (`int`, *optional*):
303
+ Number of experts
304
+
305
+ Returns:
306
+ The auxiliary loss.
307
+ """
308
+ if gate_logits is None or (
309
+ isinstance(gate_logits, Iterable) and len(gate_logits) == 0
310
+ ):
311
+ return 0
312
+
313
+ # ✨ Here is the fix for balance loss in Mixtral.
314
+ # We should calculate the balance loss in a layer-wise manner otherwise it may lead to degenerated solutions.
315
+ if use_layer_wise_balance:
316
+ if not isinstance(gate_logits, Iterable):
317
+ gate_logits = (gate_logits,)
318
+ else:
319
+ if isinstance(gate_logits, Iterable):
320
+ gate_logits = (torch.cat(gate_logits, dim=0),)
321
+ else:
322
+ gate_logits = (gate_logits,)
323
+
324
+ all_balance_losses = []
325
+
326
+ for logits in gate_logits:
327
+ if logits.dim() == 4:
328
+ logits = rearrange(logits,'b h l r-> (b h) l r')
329
+ routing_weights, selected_experts = torch.topk(logits, top_k, dim=-1)
330
+ routing_weights = routing_weights.softmax(dim=-1).to(logits.dtype)
331
+ routing_weights_full = torch.zeros_like(logits).scatter(-1, selected_experts, routing_weights)
332
+
333
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
334
+ if selected_experts.dtype != torch.int64:
335
+ selected_experts = selected_experts.to(torch.int64)
336
+
337
+ if len(selected_experts.shape) == 2:
338
+ selected_experts = selected_experts.unsqueeze(2)
339
+
340
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_memories)
341
+
342
+ # For a given token, determine if it was routed to a given expert.
343
+ expert_mask = torch.max(expert_mask, axis=-2).values
344
+
345
+ # cast to float32 otherwise mean will fail
346
+ expert_mask = expert_mask.to(torch.float32)
347
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
348
+
349
+ router_prob_per_group_and_expert = torch.mean(routing_weights_full, axis=-2)
350
+
351
+ # ✨ balance loss for this layer
352
+ balance_loss = torch.mean(
353
+ tokens_per_group_and_expert * router_prob_per_group_and_expert
354
+ ) * (num_memories**2)
355
+ all_balance_losses.append(balance_loss.reshape(1))
356
+
357
+ all_balance_losses = torch.cat(all_balance_losses).mean() # ✨
358
+
359
+ return all_balance_losses
360
+
361
+
362
+ class emdeltanetForCausalLM(emdeltanetPreTrainedModel, GenerationMixin):
363
+
364
+ _tied_weights_keys = ["lm_head.weight"]
365
+
366
+ def __init__(self, config):
367
+ super().__init__(config)
368
+ self.model = emdeltanetModel(config)
369
+ self.vocab_size = config.vocab_size
370
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
371
+ self.criterion = None
372
+
373
+ # Initialize weights and apply final processing
374
+ self.post_init()
375
+
376
+ def get_input_embeddings(self):
377
+ return self.model.embeddings
378
+
379
+ def set_input_embeddings(self, value):
380
+ self.model.embeddings = value
381
+
382
+ def get_output_embeddings(self):
383
+ return self.lm_head
384
+
385
+ def set_output_embeddings(self, new_embeddings):
386
+ self.lm_head = new_embeddings
387
+
388
+ def set_decoder(self, decoder):
389
+ self.model = decoder
390
+
391
+ def get_decoder(self):
392
+ return self.model
393
+
394
+ def generate(self, *args, **kwargs):
395
+ try:
396
+ return super().generate(*args, **kwargs)
397
+ except AttributeError as exception:
398
+ if 'past_key_values' in str(exception):
399
+ raise AttributeError(
400
+ f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
401
+ f"which is not supported for {self.__class__.__name__}. "
402
+ f"Try another generation strategy instead. "
403
+ f"For the available generation strategies, check this doc: "
404
+ f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
405
+ )
406
+ else:
407
+ raise exception
408
+
409
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
410
+ def prepare_inputs_for_generation(
411
+ self,
412
+ input_ids: torch.LongTensor = None,
413
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
414
+ attention_mask: Optional[torch.Tensor] = None,
415
+ inputs_embeds: Optional[torch.Tensor] = None,
416
+ use_cache: bool = True,
417
+ logits_to_keep: Optional[int] = None,
418
+ **kwargs
419
+ ):
420
+ # only last token for `inputs_ids` if the `past_key_values` is not empty.
421
+ if past_key_values is not None and len(past_key_values) > 0:
422
+ input_ids = input_ids[:, -1:]
423
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
424
+ if inputs_embeds is not None and len(past_key_values) == 0:
425
+ model_inputs = {'inputs_embeds': inputs_embeds}
426
+ else:
427
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
428
+ # recompiles graphs as the stride of the inputs is a guard.
429
+ # Ref: https://github.com/huggingface/transformers/pull/29114
430
+ # TODO: use `next_tokens` directly instead.
431
+ model_inputs = {'input_ids': input_ids.contiguous()}
432
+
433
+ if logits_to_keep is not None:
434
+ model_inputs['logits_to_keep'] = logits_to_keep
435
+
436
+ model_inputs.update({
437
+ 'past_key_values': past_key_values,
438
+ 'use_cache': use_cache,
439
+ 'attention_mask': attention_mask,
440
+ })
441
+ return model_inputs
442
+
443
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
444
+ def forward(
445
+ self,
446
+ input_ids: torch.LongTensor = None,
447
+ attention_mask: Optional[torch.Tensor] = None,
448
+ inputs_embeds: Optional[torch.Tensor] = None,
449
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
450
+ labels: Optional[torch.LongTensor] = None,
451
+ use_cache: Optional[bool] = None,
452
+ output_attentions: Optional[bool] = None,
453
+ output_hidden_states: Optional[bool] = None,
454
+ return_dict: Optional[bool] = None,
455
+ logits_to_keep: Optional[int] = 0,
456
+ **kwargs: Unpack[Dict]
457
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
458
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
459
+ output_hidden_states = (
460
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
461
+ )
462
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
463
+
464
+ outputs = self.model(
465
+ input_ids=input_ids,
466
+ attention_mask=attention_mask,
467
+ inputs_embeds=inputs_embeds,
468
+ past_key_values=past_key_values,
469
+ use_cache=use_cache,
470
+ output_attentions=output_attentions,
471
+ output_hidden_states=output_hidden_states,
472
+ return_dict=return_dict,
473
+ **kwargs
474
+ )
475
+
476
+ hidden_states = outputs[0]
477
+ fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
478
+
479
+ loss, logits = None, None
480
+ if not fuse_linear_and_cross_entropy or labels is None:
481
+ logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
482
+ if labels is not None:
483
+ if getattr(self, 'criterion', None) is None:
484
+ if fuse_linear_and_cross_entropy:
485
+ criterion = FusedLinearCrossEntropyLoss()
486
+ elif self.config.fuse_cross_entropy:
487
+ criterion = FusedCrossEntropyLoss(inplace_backward=True)
488
+ else:
489
+ criterion = nn.CrossEntropyLoss()
490
+ else:
491
+ criterion = self.criterion
492
+ labels = labels.to(hidden_states.device)
493
+ labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
494
+ if fuse_linear_and_cross_entropy:
495
+ loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
496
+ else:
497
+ loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
498
+
499
+ valid_router_logits = tuple(
500
+ logits
501
+ for logits in (outputs.router_logits if return_dict else outputs[-1])
502
+ if logits is not None
503
+ )
504
+ aux_loss = load_balancing_loss_func(
505
+ valid_router_logits,
506
+ self.config.ratio,
507
+ self.config.topk,
508
+ use_layer_wise_balance=True,
509
+ )
510
+ aux_loss *= self.config.aux_loss_scale
511
+ # print('aux_loss:',aux_loss)
512
+ if aux_loss:
513
+ loss += aux_loss
514
+
515
+ if not return_dict:
516
+ output = (logits,) + outputs[1:]
517
+ return (loss,) + output if loss is not None else output
518
+
519
+ # return CausalLMOutputWithPast(
520
+ # loss=loss,
521
+ # logits=logits,
522
+ # past_key_values=outputs.past_key_values,
523
+ # hidden_states=outputs.hidden_states,
524
+ # attentions=outputs.attentions,
525
+ # )
526
+
527
+ return emdeltanetCausalLMOutputWithPast(
528
+ loss=loss,
529
+ logits=logits,
530
+ past_key_values=outputs.past_key_values,
531
+ hidden_states=outputs.hidden_states,
532
+ attentions=outputs.attentions,
533
+ router_logits=outputs.router_logits,
534
+ aux_loss=aux_loss
535
+ )
fla2/models/emgla-noaux/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
4
+
5
+ from .configuration_emgla import emglaConfig
6
+ from .modeling_emgla import emglaForCausalLM, emglaModel
7
+
8
+ AutoConfig.register(emglaConfig.model_type, emglaConfig)
9
+ AutoModel.register(emglaConfig, emglaModel)
10
+ AutoModelForCausalLM.register(emglaConfig, emglaForCausalLM)
11
+
12
+ __all__ = ['emglaConfig', 'emglaForCausalLM', 'emglaModel']
fla2/models/emgla-noaux/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (520 Bytes). View file
 
fla2/models/emgla-noaux/__pycache__/configuration_emgla.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
fla2/models/emgla-noaux/__pycache__/modeling_emgla.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
fla2/models/emgla-noaux/configuration_emgla.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Dict, Optional
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+
9
+ class emglaConfig(PretrainedConfig):
10
+
11
+ model_type = 'emgla'
12
+ keys_to_ignore_at_inference = ['past_key_values']
13
+
14
+ def __init__(
15
+ self,
16
+ attn_mode: str = "chunk",
17
+ hidden_size: int = 2048,
18
+ expand_k: int = 1,
19
+ expand_v: int = 1,
20
+ use_gate: bool = False,
21
+ use_short_conv: bool = True,
22
+ conv_size: int = 4,
23
+ use_beta: bool = True,
24
+ use_output_norm: bool = True,
25
+ num_heads: int = 16,
26
+ qk_norm: str = 'l2',
27
+ qk_activation: str = 'silu',
28
+ max_position_embeddings: int = 2048,
29
+ hidden_ratio: Optional[int] = 4,
30
+ intermediate_size: Optional[int] = None,
31
+ hidden_act: str = "swish",
32
+ num_hidden_layers: int = 24,
33
+ norm_eps: float = 1e-6,
34
+ attn: Optional[Dict] = None,
35
+ use_cache: bool = True,
36
+ pad_token_id: int = None,
37
+ bos_token_id: int = 1,
38
+ eos_token_id: int = 2,
39
+ tie_word_embeddings: bool = False,
40
+ initializer_range: float = 0.02,
41
+ fuse_norm: bool = True,
42
+ fuse_swiglu: bool = True,
43
+ fuse_cross_entropy: bool = True,
44
+ vocab_size: int = 32000,
45
+ ratio : int = 2,
46
+ top_k : int = 1,
47
+ **kwargs
48
+ ):
49
+ self.attn_mode = attn_mode
50
+ self.hidden_size = hidden_size
51
+ self.expand_k = expand_k
52
+ self.expand_v = expand_v
53
+ self.use_gate = use_gate
54
+ self.use_short_conv = use_short_conv
55
+ self.conv_size = conv_size
56
+ self.use_beta = use_beta
57
+ self.use_output_norm = use_output_norm
58
+ self.num_heads = num_heads
59
+ self.qk_norm = qk_norm
60
+ self.qk_activation = qk_activation
61
+ self.max_position_embeddings = max_position_embeddings
62
+ self.topk = top_k
63
+ self.hidden_ratio = hidden_ratio
64
+ self.intermediate_size = intermediate_size
65
+ self.hidden_act = hidden_act
66
+ self.num_hidden_layers = num_hidden_layers
67
+ self.norm_eps = norm_eps
68
+ self.attn = attn
69
+ self.use_cache = use_cache
70
+ self.initializer_range = initializer_range
71
+ self.fuse_norm = fuse_norm
72
+ self.fuse_swiglu = fuse_swiglu
73
+ self.fuse_cross_entropy = fuse_cross_entropy
74
+ self.vocab_size = vocab_size
75
+ self.ratio = ratio
76
+
77
+ if attn is not None:
78
+ if not isinstance(attn, Dict):
79
+ raise ValueError("attn must be a dictionary")
80
+ if 'layers' not in attn:
81
+ raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
82
+ if 'num_heads' not in attn:
83
+ raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
84
+ attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
85
+ attn['qkv_bias'] = attn.get('qkv_bias', False)
86
+ attn['window_size'] = attn.get('window_size', None)
87
+ attn['rope_theta'] = attn.get('rope_theta', 10000.)
88
+
89
+ super().__init__(
90
+ pad_token_id=pad_token_id,
91
+ bos_token_id=bos_token_id,
92
+ eos_token_id=eos_token_id,
93
+ tie_word_embeddings=tie_word_embeddings,
94
+ **kwargs,
95
+ )
fla2/models/emgla-noaux/modeling_emgla.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.generation import GenerationMixin
13
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
14
+ from transformers.modeling_utils import PreTrainedModel
15
+ from transformers.utils import logging
16
+ from transformers.utils.deprecation import deprecate_kwarg
17
+
18
+ from ...layers.attn import Attention
19
+ from ...layers.emgla import emgla
20
+ from ...models.emgla.configuration_emgla import emglaConfig
21
+ from fla.models.utils import Cache
22
+ from fla.modules import FusedCrossEntropyLoss, FusedLinearCrossEntropyLoss
23
+ from fla.modules import GatedMLP as emglaMLP
24
+ from ...modules import RMSNorm
25
+ from ...modules import RotaryEmbedding
26
+ logger = logging.get_logger(__name__)
27
+
28
+ if TYPE_CHECKING:
29
+ from transformers.processing_utils import Unpack
30
+
31
+
32
+ class emglaBlock(nn.Module):
33
+ def __init__(self, config: emglaConfig, layer_idx: int):
34
+ super().__init__()
35
+
36
+ self.config = config
37
+ self.layer_idx = layer_idx
38
+
39
+ self.attn_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
40
+ if config.attn is not None and layer_idx in config.attn['layers']:
41
+ self.attn = Attention(
42
+ hidden_size=config.hidden_size,
43
+ num_heads=config.attn['num_heads'],
44
+ num_kv_heads=config.attn['num_kv_heads'],
45
+ qkv_bias=config.attn['qkv_bias'],
46
+ window_size=config.attn['window_size'],
47
+ rope_theta=config.attn['rope_theta'],
48
+ max_position_embeddings=config.max_position_embeddings,
49
+ layer_idx=layer_idx
50
+ )
51
+ else:
52
+ self.attn = emgla(
53
+ mode=config.attn_mode,
54
+ hidden_size=config.hidden_size,
55
+ expand_k=config.expand_k,
56
+ expand_v=config.expand_v,
57
+ num_heads=config.num_heads,
58
+ use_gate=config.use_gate,
59
+ use_short_conv=config.use_short_conv,
60
+ use_output_norm=config.use_output_norm,
61
+ conv_size=config.conv_size,
62
+ norm_eps=config.norm_eps,
63
+ ratio = config.ratio,
64
+ top_k = config.topk,
65
+ layer_idx=layer_idx
66
+ )
67
+ self.mlp_norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
68
+ self.mlp = emglaMLP(
69
+ hidden_size=config.hidden_size,
70
+ hidden_ratio=config.hidden_ratio,
71
+ intermediate_size=config.intermediate_size,
72
+ hidden_act=config.hidden_act,
73
+ fuse_swiglu=config.fuse_swiglu
74
+ )
75
+
76
+ def forward(
77
+ self,
78
+ hidden_states: torch.Tensor,
79
+ attention_mask: Optional[torch.Tensor] = None,
80
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
81
+ use_cache: Optional[bool] = False,
82
+ output_attentions: Optional[bool] = False,
83
+ **kwargs: Unpack[Dict]
84
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
85
+ residual = hidden_states
86
+ hidden_states = self.attn_norm(hidden_states)
87
+ hidden_states, attentions, past_key_values = self.attn(
88
+ hidden_states=hidden_states,
89
+ attention_mask=attention_mask,
90
+ past_key_values=past_key_values,
91
+ use_cache=use_cache,
92
+ output_attentions=output_attentions,
93
+ **kwargs
94
+ )
95
+ if self.config.fuse_norm:
96
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
97
+ else:
98
+ hidden_states = residual + hidden_states
99
+ residual = hidden_states
100
+ hidden_states = self.mlp_norm(hidden_states)
101
+ hidden_states = self.mlp(hidden_states, **kwargs)
102
+ hidden_states = residual + hidden_states
103
+
104
+ outputs = (hidden_states, attentions, past_key_values)
105
+
106
+ return outputs
107
+
108
+
109
+ class emglaPreTrainedModel(PreTrainedModel):
110
+
111
+ config_class = emglaConfig
112
+ base_model_prefix = 'model'
113
+ supports_gradient_checkpointing = True
114
+ _no_split_modules = ['emglaBlock']
115
+ _supports_cache_class = True
116
+
117
+ def __init__(self, *inputs, **kwargs):
118
+ super().__init__(*inputs, **kwargs)
119
+
120
+ def _init_weights(
121
+ self,
122
+ module: nn.Module,
123
+ prenorm_residual_strategy: Optional[str] = 'rescale',
124
+ num_residuals_per_layer: int = 2,
125
+ ):
126
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
127
+ # Slightly different from the TF version which uses truncated_normal for initialization
128
+ # cf https://github.com/pytorch/pytorch/pull/5617
129
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
130
+ if module.bias is not None:
131
+ nn.init.zeros_(module.bias)
132
+ elif isinstance(module, nn.Embedding):
133
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
134
+ elif hasattr(module, 'reset_parameters'):
135
+ module.reset_parameters()
136
+
137
+ if prenorm_residual_strategy is not None:
138
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
139
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
140
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
141
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
142
+ #
143
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
144
+ p = None
145
+ if hasattr(module, 'o_proj'):
146
+ p = module.o_proj.weight
147
+ elif hasattr(module, 'down_proj'):
148
+ p = module.down_proj.weight
149
+ if p is not None:
150
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
151
+ # Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
152
+ # We need to reinit p since this code could be called multiple times
153
+ # Having just p *= scale would repeatedly scale it down
154
+ if prenorm_residual_strategy == 'rescale':
155
+ nn.init.kaiming_uniform_(p, a=math.sqrt(5))
156
+ with torch.no_grad():
157
+ p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers)
158
+ elif prenorm_residual_strategy == 'zero':
159
+ nn.init.zeros_(p)
160
+ else:
161
+ raise ValueError(f"Invalid prenorm_residual_strategy: {prenorm_residual_strategy}")
162
+
163
+
164
+ class emglaModel(emglaPreTrainedModel):
165
+
166
+ def __init__(self, config: emglaConfig):
167
+ super().__init__(config)
168
+ self.padding_idx = config.pad_token_id
169
+ self.vocab_size = config.vocab_size
170
+
171
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
172
+ self.layers = nn.ModuleList([emglaBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
173
+ self.norm = (RMSNorm if config.fuse_norm else nn.RMSNorm)(config.hidden_size, eps=config.norm_eps)
174
+
175
+ self.gradient_checkpointing = False
176
+
177
+ self.post_init()
178
+
179
+ def get_input_embeddings(self):
180
+ return self.embeddings
181
+
182
+ def set_input_embeddings(self, value):
183
+ self.embeddings = value
184
+
185
+ def forward(
186
+ self,
187
+ input_ids: Optional[torch.LongTensor] = None,
188
+ attention_mask: Optional[torch.Tensor] = None, # noqa
189
+ inputs_embeds: Optional[torch.FloatTensor] = None,
190
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
191
+ use_cache: Optional[bool] = None,
192
+ output_attentions: Optional[bool] = None,
193
+ output_hidden_states: Optional[bool] = None,
194
+ return_dict: Optional[bool] = None,
195
+ **kwargs: Unpack[Dict]
196
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
197
+ if output_attentions:
198
+ warnings.warn("`emglaModel` does not `output_attentions` now, setting it to `False`.")
199
+ output_attentions = False
200
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
201
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
202
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
203
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
204
+
205
+ # retrieve input_ids and inputs_embeds
206
+ if input_ids is not None and inputs_embeds is not None:
207
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
208
+ if input_ids is None and inputs_embeds is None:
209
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
210
+
211
+ if inputs_embeds is None:
212
+ inputs_embeds = self.embeddings(input_ids)
213
+ hidden_states = inputs_embeds
214
+
215
+ if use_cache and not isinstance(past_key_values, Cache):
216
+ past_key_values = Cache.from_legacy_cache(past_key_values)
217
+
218
+ if self.gradient_checkpointing and self.training and use_cache:
219
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
220
+ use_cache = False
221
+
222
+ all_hidden_states = () if output_hidden_states else None
223
+ all_attns = () if output_attentions else None
224
+ for layer in self.layers:
225
+ if output_hidden_states:
226
+ all_hidden_states += (hidden_states,)
227
+
228
+ if self.gradient_checkpointing and self.training:
229
+ hidden_states, attentions, past_key_values = self._gradient_checkpointing_func(
230
+ layer.__call__,
231
+ hidden_states,
232
+ attention_mask,
233
+ past_key_values,
234
+ use_cache,
235
+ output_attentions,
236
+ **kwargs
237
+ )
238
+ else:
239
+ hidden_states, attentions, past_key_values = layer(
240
+ hidden_states,
241
+ attention_mask=attention_mask,
242
+ past_key_values=past_key_values,
243
+ use_cache=use_cache,
244
+ output_attentions=output_attentions,
245
+ **kwargs
246
+ )
247
+
248
+ if output_attentions:
249
+ all_attns += (attentions,)
250
+
251
+ hidden_states = self.norm(hidden_states)
252
+
253
+ # add hidden states from the last decoder layer
254
+ if output_hidden_states:
255
+ all_hidden_states += (hidden_states,)
256
+
257
+ if not return_dict:
258
+ return tuple(i for i in [hidden_states, past_key_values, all_hidden_states, all_attns] if i is not None)
259
+ return BaseModelOutputWithPast(
260
+ last_hidden_state=hidden_states,
261
+ past_key_values=past_key_values,
262
+ hidden_states=all_hidden_states,
263
+ attentions=all_attns
264
+ )
265
+
266
+
267
+ class emglaForCausalLM(emglaPreTrainedModel, GenerationMixin):
268
+
269
+ _tied_weights_keys = ["lm_head.weight"]
270
+
271
+ def __init__(self, config):
272
+ super().__init__(config)
273
+ self.model = emglaModel(config)
274
+ self.vocab_size = config.vocab_size
275
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
276
+ self.criterion = None
277
+
278
+ # Initialize weights and apply final processing
279
+ self.post_init()
280
+
281
+ def get_input_embeddings(self):
282
+ return self.model.embeddings
283
+
284
+ def set_input_embeddings(self, value):
285
+ self.model.embeddings = value
286
+
287
+ def get_output_embeddings(self):
288
+ return self.lm_head
289
+
290
+ def set_output_embeddings(self, new_embeddings):
291
+ self.lm_head = new_embeddings
292
+
293
+ def set_decoder(self, decoder):
294
+ self.model = decoder
295
+
296
+ def get_decoder(self):
297
+ return self.model
298
+
299
+ def generate(self, *args, **kwargs):
300
+ try:
301
+ return super().generate(*args, **kwargs)
302
+ except AttributeError as exception:
303
+ if 'past_key_values' in str(exception):
304
+ raise AttributeError(
305
+ f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, "
306
+ f"which is not supported for {self.__class__.__name__}. "
307
+ f"Try another generation strategy instead. "
308
+ f"For the available generation strategies, check this doc: "
309
+ f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
310
+ )
311
+ else:
312
+ raise exception
313
+
314
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
315
+ def prepare_inputs_for_generation(
316
+ self,
317
+ input_ids: torch.LongTensor = None,
318
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
319
+ attention_mask: Optional[torch.Tensor] = None,
320
+ inputs_embeds: Optional[torch.Tensor] = None,
321
+ use_cache: bool = True,
322
+ logits_to_keep: Optional[int] = None,
323
+ **kwargs
324
+ ):
325
+ # only last token for `inputs_ids` if the `past_key_values` is not empty.
326
+ if past_key_values is not None and len(past_key_values) > 0:
327
+ input_ids = input_ids[:, -1:]
328
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
329
+ if inputs_embeds is not None and len(past_key_values) == 0:
330
+ model_inputs = {'inputs_embeds': inputs_embeds}
331
+ else:
332
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
333
+ # recompiles graphs as the stride of the inputs is a guard.
334
+ # Ref: https://github.com/huggingface/transformers/pull/29114
335
+ # TODO: use `next_tokens` directly instead.
336
+ model_inputs = {'input_ids': input_ids.contiguous()}
337
+
338
+ if logits_to_keep is not None:
339
+ model_inputs['logits_to_keep'] = logits_to_keep
340
+
341
+ model_inputs.update({
342
+ 'past_key_values': past_key_values,
343
+ 'use_cache': use_cache,
344
+ 'attention_mask': attention_mask,
345
+ })
346
+ return model_inputs
347
+
348
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
349
+ def forward(
350
+ self,
351
+ input_ids: torch.LongTensor = None,
352
+ attention_mask: Optional[torch.Tensor] = None,
353
+ inputs_embeds: Optional[torch.Tensor] = None,
354
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
355
+ labels: Optional[torch.LongTensor] = None,
356
+ use_cache: Optional[bool] = None,
357
+ output_attentions: Optional[bool] = None,
358
+ output_hidden_states: Optional[bool] = None,
359
+ return_dict: Optional[bool] = None,
360
+ logits_to_keep: Optional[int] = 0,
361
+ **kwargs: Unpack[Dict]
362
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
363
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
364
+ output_hidden_states = (
365
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
366
+ )
367
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
368
+
369
+ outputs = self.model(
370
+ input_ids=input_ids,
371
+ attention_mask=attention_mask,
372
+ inputs_embeds=inputs_embeds,
373
+ past_key_values=past_key_values,
374
+ use_cache=use_cache,
375
+ output_attentions=output_attentions,
376
+ output_hidden_states=output_hidden_states,
377
+ return_dict=return_dict,
378
+ **kwargs
379
+ )
380
+
381
+ hidden_states = outputs[0]
382
+ fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
383
+
384
+ loss, logits = None, None
385
+ if not fuse_linear_and_cross_entropy or labels is None:
386
+ logits = self.lm_head(hidden_states if logits_to_keep is None else hidden_states[:, -logits_to_keep:])
387
+ if labels is not None:
388
+ if getattr(self, 'criterion', None) is None:
389
+ if fuse_linear_and_cross_entropy:
390
+ criterion = FusedLinearCrossEntropyLoss()
391
+ elif self.config.fuse_cross_entropy:
392
+ criterion = FusedCrossEntropyLoss(inplace_backward=True)
393
+ else:
394
+ criterion = nn.CrossEntropyLoss()
395
+ else:
396
+ criterion = self.criterion
397
+ labels = labels.to(hidden_states.device)
398
+ labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], criterion.ignore_index)), 1)
399
+ if fuse_linear_and_cross_entropy:
400
+ loss = criterion(hidden_states, labels, self.lm_head.weight, self.lm_head.bias)
401
+ else:
402
+ loss = criterion(logits.view(labels.numel(), -1), labels.view(-1))
403
+
404
+ if not return_dict:
405
+ output = (logits,) + outputs[1:]
406
+ return (loss,) + output if loss is not None else output
407
+
408
+ return CausalLMOutputWithPast(
409
+ loss=loss,
410
+ logits=logits,
411
+ past_key_values=outputs.past_key_values,
412
+ hidden_states=outputs.hidden_states,
413
+ attentions=outputs.attentions,
414
+ )
fla2/models/emgla/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
4
+
5
+ from .configuration_emgla import emglaConfig
6
+ from .modeling_emgla import emglaForCausalLM, emglaModel
7
+
8
+ AutoConfig.register(emglaConfig.model_type, emglaConfig)
9
+ AutoModel.register(emglaConfig, emglaModel)
10
+ AutoModelForCausalLM.register(emglaConfig, emglaForCausalLM)
11
+
12
+ __all__ = ['emglaConfig', 'emglaForCausalLM', 'emglaModel']
fla2/models/emgla/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (515 Bytes). View file
 
fla2/models/emgla/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (681 Bytes). View file
 
fla2/models/emgla/__pycache__/configuration_emgla.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
fla2/models/emgla/__pycache__/configuration_emgla.cpython-312.pyc ADDED
Binary file (3.82 kB). View file
 
fla2/models/emgla/__pycache__/modeling_emgla.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
fla2/models/emgla/__pycache__/modeling_emgla.cpython-312.pyc ADDED
Binary file (23.1 kB). View file