Madras1 commited on
Commit
384cd73
·
verified ·
1 Parent(s): 67d6ead

Correção Crítica: Bias Loading e Forward Kwargs (Versão Final)

Browse files
config.json CHANGED
@@ -13,7 +13,6 @@
13
  "intermediate_size": 1024,
14
  "max_position_embeddings": 1024,
15
  "rms_norm_eps": 1e-06,
16
- "initializer_range": 0.02,
17
  "model_type": "tinygpt",
18
  "torch_dtype": "float32"
19
  }
 
13
  "intermediate_size": 1024,
14
  "max_position_embeddings": 1024,
15
  "rms_norm_eps": 1e-06,
 
16
  "model_type": "tinygpt",
17
  "torch_dtype": "float32"
18
  }
configuration_tinygpt.py CHANGED
@@ -1,19 +1,7 @@
1
  from transformers import PretrainedConfig
2
-
3
  class TinyGPTConfig(PretrainedConfig):
4
  model_type = "tinygpt"
5
- def __init__(
6
- self,
7
- vocab_size=32000,
8
- hidden_size=384,
9
- num_hidden_layers=12,
10
- num_attention_heads=8,
11
- intermediate_size=1536,
12
- max_position_embeddings=1024,
13
- rms_norm_eps=1e-6,
14
- initializer_range=0.02,
15
- **kwargs,
16
- ):
17
  self.vocab_size = vocab_size
18
  self.hidden_size = hidden_size
19
  self.num_hidden_layers = num_hidden_layers
 
1
  from transformers import PretrainedConfig
 
2
  class TinyGPTConfig(PretrainedConfig):
3
  model_type = "tinygpt"
4
+ def __init__(self, vocab_size=32000, hidden_size=384, num_hidden_layers=12, num_attention_heads=8, intermediate_size=1024, max_position_embeddings=1024, rms_norm_eps=1e-6, initializer_range=0.02, **kwargs):
 
 
 
 
 
 
 
 
 
 
 
5
  self.vocab_size = vocab_size
6
  self.hidden_size = hidden_size
7
  self.num_hidden_layers = num_hidden_layers
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e30978bab63e4b3d523cb089eeace6028d93af39b418f122899ebdb8275bd91
3
- size 165985968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84954ffd48c829d1a6c3a95bc668df9206c2e40562f152d92de821f377afeac0
3
+ size 166134520
modeling_tinygpt.py CHANGED
@@ -2,13 +2,13 @@ import torch
2
  import torch.nn as nn
3
  from transformers import PreTrainedModel
4
  from .configuration_tinygpt import TinyGPTConfig
 
5
 
6
  class RMSNorm(nn.Module):
7
  def __init__(self, dim, eps=1e-6):
8
  super().__init__()
9
  self.eps = eps
10
  self.weight = nn.Parameter(torch.ones(dim))
11
-
12
  def forward(self, x):
13
  var = torch.mean(x ** 2, dim=-1, keepdim=True)
14
  return x * torch.rsqrt(var + self.eps) * self.weight
@@ -16,10 +16,9 @@ class RMSNorm(nn.Module):
16
  class MLP(nn.Module):
17
  def __init__(self, config):
18
  super().__init__()
19
- self.fc_in = nn.Linear(config.hidden_size, config.intermediate_size)
20
  self.act = nn.GELU()
21
- self.fc_out = nn.Linear(config.intermediate_size, config.hidden_size)
22
-
23
  def forward(self, x):
24
  return self.fc_out(self.act(self.fc_in(x)))
25
 
@@ -29,22 +28,19 @@ class Attention(nn.Module):
29
  self.n_heads = config.num_attention_heads
30
  self.head_dim = config.hidden_size // config.num_attention_heads
31
  self.scale = self.head_dim ** -0.5
32
-
33
- self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
34
- self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
35
- self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
36
- self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
37
-
38
  def forward(self, x, mask=None):
39
  B, T, C = x.shape
40
  q = self.q_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
41
  k = self.k_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
42
  v = self.v_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
43
-
44
  att = (q @ k.transpose(-2, -1)) * self.scale
45
  if mask is not None:
 
46
  att = att.masked_fill(mask == 0, float('-inf'))
47
-
48
  att = torch.softmax(att, dim=-1)
49
  out = (att @ v).transpose(1, 2).contiguous().view(B, T, C)
50
  return self.out_proj(out)
@@ -56,7 +52,6 @@ class Block(nn.Module):
56
  self.attn = Attention(config)
57
  self.norm_2 = RMSNorm(config.hidden_size, config.rms_norm_eps)
58
  self.mlp = MLP(config)
59
-
60
  def forward(self, x, mask=None):
61
  x = x + self.attn(self.norm_1(x), mask)
62
  x = x + self.mlp(self.norm_2(x))
@@ -79,8 +74,7 @@ class TinyGPTModel(TinyGPTPreTrainedModel):
79
  self.wpe = nn.Embedding(config.max_position_embeddings, config.hidden_size)
80
  self.h = nn.ModuleList([Block(config) for _ in range(config.num_hidden_layers)])
81
  self.ln_f = RMSNorm(config.hidden_size, config.rms_norm_eps)
82
-
83
- def forward(self, input_ids):
84
  B, T = input_ids.shape
85
  pos = torch.arange(0, T, dtype=torch.long, device=input_ids.device)
86
  x = self.wte(input_ids) + self.wpe(pos)
@@ -94,14 +88,24 @@ class TinyGPTForCausalLM(TinyGPTPreTrainedModel):
94
  super().__init__(config)
95
  self.transformer = TinyGPTModel(config)
96
  self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
97
-
98
- def forward(self, input_ids, labels=None):
99
- hidden = self.transformer(input_ids)
 
100
  logits = self.lm_head(hidden)
 
101
  loss = None
102
  if labels is not None:
103
  shift_logits = logits[..., :-1, :].contiguous()
104
  shift_labels = labels[..., 1:].contiguous()
105
  loss_fct = nn.CrossEntropyLoss()
106
  loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
107
- return {"loss": loss, "logits": logits}
 
 
 
 
 
 
 
 
 
2
  import torch.nn as nn
3
  from transformers import PreTrainedModel
4
  from .configuration_tinygpt import TinyGPTConfig
5
+ from transformers.modeling_outputs import CausalLMOutputWithPast # Importante para retorno correto
6
 
7
  class RMSNorm(nn.Module):
8
  def __init__(self, dim, eps=1e-6):
9
  super().__init__()
10
  self.eps = eps
11
  self.weight = nn.Parameter(torch.ones(dim))
 
12
  def forward(self, x):
13
  var = torch.mean(x ** 2, dim=-1, keepdim=True)
14
  return x * torch.rsqrt(var + self.eps) * self.weight
 
16
  class MLP(nn.Module):
17
  def __init__(self, config):
18
  super().__init__()
19
+ self.fc_in = nn.Linear(config.hidden_size, config.intermediate_size, bias=True)
20
  self.act = nn.GELU()
21
+ self.fc_out = nn.Linear(config.intermediate_size, config.hidden_size, bias=True)
 
22
  def forward(self, x):
23
  return self.fc_out(self.act(self.fc_in(x)))
24
 
 
28
  self.n_heads = config.num_attention_heads
29
  self.head_dim = config.hidden_size // config.num_attention_heads
30
  self.scale = self.head_dim ** -0.5
31
+ self.q_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
32
+ self.k_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
33
+ self.v_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
34
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
 
 
35
  def forward(self, x, mask=None):
36
  B, T, C = x.shape
37
  q = self.q_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
38
  k = self.k_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
39
  v = self.v_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
 
40
  att = (q @ k.transpose(-2, -1)) * self.scale
41
  if mask is not None:
42
+ if mask.dim() == 2: mask = mask.unsqueeze(0).unsqueeze(0)
43
  att = att.masked_fill(mask == 0, float('-inf'))
 
44
  att = torch.softmax(att, dim=-1)
45
  out = (att @ v).transpose(1, 2).contiguous().view(B, T, C)
46
  return self.out_proj(out)
 
52
  self.attn = Attention(config)
53
  self.norm_2 = RMSNorm(config.hidden_size, config.rms_norm_eps)
54
  self.mlp = MLP(config)
 
55
  def forward(self, x, mask=None):
56
  x = x + self.attn(self.norm_1(x), mask)
57
  x = x + self.mlp(self.norm_2(x))
 
74
  self.wpe = nn.Embedding(config.max_position_embeddings, config.hidden_size)
75
  self.h = nn.ModuleList([Block(config) for _ in range(config.num_hidden_layers)])
76
  self.ln_f = RMSNorm(config.hidden_size, config.rms_norm_eps)
77
+ def forward(self, input_ids, attention_mask=None):
 
78
  B, T = input_ids.shape
79
  pos = torch.arange(0, T, dtype=torch.long, device=input_ids.device)
80
  x = self.wte(input_ids) + self.wpe(pos)
 
88
  super().__init__(config)
89
  self.transformer = TinyGPTModel(config)
90
  self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
91
+
92
+ # AQUI ESTAVA O ERRO! Adicionei **kwargs para engolir return_dict, output_attentions, etc.
93
+ def forward(self, input_ids, attention_mask=None, labels=None, **kwargs):
94
+ hidden = self.transformer(input_ids, attention_mask)
95
  logits = self.lm_head(hidden)
96
+
97
  loss = None
98
  if labels is not None:
99
  shift_logits = logits[..., :-1, :].contiguous()
100
  shift_labels = labels[..., 1:].contiguous()
101
  loss_fct = nn.CrossEntropyLoss()
102
  loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
103
+
104
+ # Retorna objeto padrão do HF para evitar erros de compatibilidade
105
+ return CausalLMOutputWithPast(
106
+ loss=loss,
107
+ logits=logits,
108
+ )
109
+
110
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
111
+ return {"input_ids": input_ids}