Abdurrahmanesc commited on
Commit
06496e9
·
verified ·
1 Parent(s): 06e366b

Update modeling_tinygpt.py

Browse files
Files changed (1) hide show
  1. modeling_tinygpt.py +64 -41
modeling_tinygpt.py CHANGED
@@ -1,63 +1,86 @@
1
  import torch
2
  import torch.nn as nn
 
3
  from torch.nn import CrossEntropyLoss
4
  from transformers.modeling_outputs import CausalLMOutput
5
  from transformers.modeling_utils import PreTrainedModel
6
  from configuration_tinygpt import TinyGPTConfig # Changed from relative to absolute import
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  class TinyGPTForCausalLM(PreTrainedModel):
9
  config_class = TinyGPTConfig
10
 
11
  def __init__(self, config):
12
  super().__init__(config)
13
 
14
- self.embed = nn.Embedding(config.vocab_size, config.d_model)
15
- self.pos_embed = nn.Embedding(config.max_seq_len, config.d_model)
16
-
17
- self.blocks = nn.ModuleList([
18
- nn.TransformerEncoderLayer(
19
- d_model=config.d_model,
20
- nhead=config.n_heads,
21
- dim_feedforward=config.d_ff,
22
- batch_first=True
23
- )
24
- for _ in range(config.n_layers)
25
- ])
26
-
27
- self.norm = nn.LayerNorm(config.d_model)
28
- self.lm_head = nn.Linear(config.d_model, config.vocab_size)
29
 
30
  self.post_init()
31
 
32
  def forward(self, input_ids, labels=None):
33
- B, T = input_ids.shape
34
- positions = torch.arange(T, device=input_ids.device).unsqueeze(0)
35
-
36
- x = self.embed(input_ids) + self.pos_embed(positions)
37
-
38
- for blk in self.blocks:
39
- x = blk(x)
40
-
41
- x = self.norm(x)
42
- logits = self.lm_head(x)
43
 
44
  loss = None
45
  if labels is not None:
46
- shift_logits = logits[:, :-1, :].contiguous()
47
- shift_labels = labels[:, 1:].contiguous()
48
- loss_fct = CrossEntropyLoss()
49
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
50
- shift_labels.view(-1))
51
 
52
  return CausalLMOutput(
53
- loss=loss,
54
- logits=logits
55
- )
56
-
57
- @torch.no_grad()
58
- def generate(self, input_ids, max_new_tokens=50):
59
- for _ in range(max_new_tokens):
60
- logits = self.forward(input_ids).logits
61
- next_token = torch.argmax(logits[:, -1, :], dim=-1)
62
- input_ids = torch.cat([input_ids, next_token[:, None]], dim=1)
63
- return input_ids
 
1
  import torch
2
  import torch.nn as nn
3
+ import torch.nn.functional as F
4
  from torch.nn import CrossEntropyLoss
5
  from transformers.modeling_outputs import CausalLMOutput
6
  from transformers.modeling_utils import PreTrainedModel
7
  from configuration_tinygpt import TinyGPTConfig # Changed from relative to absolute import
8
 
9
+ # -------------------------
10
+ # TinyGPTConfig (Required)
11
+ # -------------------------
12
+ class TinyGPTConfig:
13
+ model_type = "tinygpt"
14
+
15
+ def __init__(self,
16
+ vocab_size=30522,
17
+ d_model=256,
18
+ n_heads=4,
19
+ n_layers=4,
20
+ d_ff=1024,
21
+ max_seq_len=256,
22
+ **kwargs):
23
+ self.vocab_size = vocab_size
24
+ self.d_model = d_model
25
+ self.n_heads = n_heads
26
+ self.n_layers = n_layers
27
+ self.d_ff = d_ff
28
+ self.max_seq_len = max_seq_len
29
+
30
+ # store additional HF keys
31
+ for k, v in kwargs.items():
32
+ setattr(self, k, v)
33
+
34
+
35
+ # -------------------------
36
+ # Your Original TinyGPT Core
37
+ # -------------------------
38
+ class TinyGPT(nn.Module):
39
+ def __init__(self, vocab_size=30522, d_model=256, n_heads=4,
40
+ n_layers=4, d_ff=1024, max_seq_len=256):
41
+ x = self.ln_f(x)
42
+ return self.head(x)
43
+
44
+
45
+ class TransformerBlock(nn.Module):
46
+ def __init__(self, d_model, n_heads, d_ff):
47
+ super().__init__()
48
+ ff_out = self.ff(x)
49
+ x = self.ln2(x + ff_out)
50
+ return x
51
+
52
+
53
+ # -------------------------
54
+ # HF Wrapper: TinyGPTForCausalLM
55
+ # -------------------------
56
  class TinyGPTForCausalLM(PreTrainedModel):
57
  config_class = TinyGPTConfig
58
 
59
  def __init__(self, config):
60
  super().__init__(config)
61
 
62
+ self.model = TinyGPT(
63
+ vocab_size=config.vocab_size,
64
+ d_model=config.d_model,
65
+ n_heads=config.n_heads,
66
+ n_layers=config.n_layers,
67
+ d_ff=config.d_ff,
68
+ max_seq_len=config.max_seq_len
69
+ )
 
 
 
 
 
 
 
70
 
71
  self.post_init()
72
 
73
  def forward(self, input_ids, labels=None):
74
+ logits = self.model(input_ids)
 
 
 
 
 
 
 
 
 
75
 
76
  loss = None
77
  if labels is not None:
78
+ loss = nn.CrossEntropyLoss()(
79
+ logits.view(-1, logits.size(-1)),
80
+ labels.view(-1)
81
+ )
 
82
 
83
  return CausalLMOutput(
84
+ logits=logits,
85
+ loss=loss
86
+ )