Rajendro commited on
Commit
5dfef55
·
verified ·
1 Parent(s): 6824eef

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. app.py +85 -0
  3. gitattributes +35 -0
  4. requirements.txt +5 -0
  5. transformers.py +129 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: StyleDecoderTransformer
3
- emoji:
4
- colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.12.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: DecoderTransformerModel
3
+ emoji: 🌖
4
+ colorFrom: red
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 5.12.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Decoder based Sequence to sequence Transformer model
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import tiktoken
5
+ from huggingface_hub import hf_hub_download
6
+ from transformers import GPT, GPTConfig # Import your model class
7
+
8
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
+
10
+ # Load the model from Hugging Face Hub
11
+ def load_model_from_huggingface():
12
+ # Replace with your Hugging Face model ID (username/model-name)
13
+ model_id = "EzhirkoArulmozhi/DecoderTransformerModel"
14
+ checkpoint_path = hf_hub_download(repo_id=model_id, filename="gpt_checkpoint.pth")
15
+
16
+ checkpoint = torch.load(checkpoint_path, map_location=device)
17
+ config = checkpoint['config']
18
+ model = GPT(config)
19
+ model.load_state_dict(checkpoint['model_state_dict'])
20
+ model.to(device)
21
+ model.eval() # Set to evaluation mode
22
+
23
+ # Disable gradient computation
24
+ for param in model.parameters():
25
+ param.requires_grad = False
26
+ return model
27
+
28
+ model = load_model_from_huggingface()
29
+ # Force model to stay in eval mode
30
+ model.train(False)
31
+
32
+ def generate_text(prompt, max_length=25, num_samples=1):
33
+ enc = tiktoken.get_encoding('gpt2')
34
+ tokens = enc.encode(prompt)
35
+ tokens = torch.tensor(tokens, dtype=torch.long)
36
+ tokens = tokens.unsqueeze(0).repeat(num_samples, 1)
37
+ tokens = tokens.to(device)
38
+
39
+ with torch.no_grad():
40
+ for _ in range(max_length):
41
+ if tokens.size(1) >= 1024: # GPT context length
42
+ break
43
+
44
+ logits = model(tokens)[0]
45
+ logits = logits[:, -1, :]
46
+ probs = F.softmax(logits, dim=-1)
47
+
48
+ # Top-k sampling
49
+ topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
50
+ ix = torch.multinomial(topk_probs, 1)
51
+ next_token = torch.gather(topk_indices, -1, ix)
52
+
53
+ tokens = torch.cat((tokens, next_token), dim=1)
54
+
55
+ # Remove special token check entirely
56
+ # Just generate for the specified length or until context limit
57
+
58
+ generated_texts = []
59
+ for i in range(num_samples):
60
+ text = enc.decode(tokens[i].tolist())
61
+ generated_texts.append(text)
62
+
63
+ return '\n\n---\n\n'.join(generated_texts)
64
+
65
+ # Create Gradio interface
66
+ iface = gr.Interface(
67
+ fn=generate_text,
68
+ inputs=[
69
+ gr.Textbox(label="Prompt", value="How are you doing Raj? Have a wonderful day."),
70
+ gr.Radio(choices=[25, 50, 75, 100, 125], value=100, label="Max Length", type="value"),
71
+ gr.Radio(choices=[1, 2, 3, 4], value=1, label="Number of Samples", type="value"),
72
+ ],
73
+ outputs=gr.Textbox(label="Generated Text"),
74
+ title="Dialog Generator",
75
+ description="Enter a prompt to generate a diaglog.",
76
+ examples=[
77
+ ["Lets fix the real issues in the world", 125, 1],
78
+ ["Lets do something about it", 100, 2],
79
+ ["I ahve not seen you today", 75, 3],
80
+ ["I am a bad person", 50, 4],
81
+ ]
82
+ )
83
+
84
+ if __name__ == "__main__":
85
+ iface.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ tiktoken
4
+ transformers
5
+ huggingface_hub
transformers.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ class CausalSelfAttention(nn.Module):
7
+
8
+ def __init__(self, config):
9
+ super().__init__()
10
+ assert config.n_embd % config.n_head == 0
11
+ # key, query, value projections for all heads, but in a batch
12
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
13
+ # output projection
14
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
15
+ self.c_proj.NANGPT_SCALE_INIT = 1
16
+ # regularization
17
+ self.n_head = config.n_head
18
+ self.n_embd = config.n_embd
19
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
20
+
21
+ def forward(self, x):
22
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
23
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
24
+ # nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
25
+ # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
26
+ qkv = self.c_attn(x)
27
+ q, k, v = qkv.split(self.n_embd, dim=2)
28
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
29
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
30
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
31
+
32
+ # att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
33
+ # att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
34
+ # att = F.softmax(att, dim=-1)
35
+ # y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
36
+
37
+ y = F.scaled_dot_product_attention(q, k, v, is_causal = True) # Flash attention
38
+
39
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
40
+ # output projection
41
+ y = self.c_proj(y)
42
+ return y
43
+
44
+ class MLP(nn.Module):
45
+
46
+ def __init__(self, config):
47
+ super().__init__()
48
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
49
+ self.gelu = nn.GELU(approximate='tanh')
50
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
51
+ self.c_proj.NANOGPT_SCALE_INIT = 1
52
+
53
+ def forward(self, x):
54
+ x = self.c_fc(x)
55
+ x = self.gelu(x)
56
+ x = self.c_proj(x)
57
+ return x
58
+
59
+ class Block(nn.Module):
60
+
61
+ def __init__(self, config):
62
+ super().__init__()
63
+ self.ln_1 = nn.LayerNorm(config.n_embd)
64
+ self.attn = CausalSelfAttention(config)
65
+ self.ln_2 = nn.LayerNorm(config.n_embd)
66
+ self.mlp = MLP(config)
67
+
68
+ def forward(self, x):
69
+ x = x + self.attn(self.ln_1(x))
70
+ x = x + self.mlp(self.ln_2(x))
71
+ return x
72
+
73
+ @dataclass
74
+ class GPTConfig:
75
+ block_size: int = 1024 # max sequence length
76
+ vocab_size: int = 50304 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
77
+ n_layer: int = 12 # number of layers
78
+ n_head: int = 12 # number of heads
79
+ n_embd: int = 768 # embedding dimension
80
+
81
+ class GPT(nn.Module):
82
+ def __init__(self, config):
83
+ super().__init__()
84
+ self.config = config
85
+
86
+ self.transformer = nn.ModuleDict(dict(
87
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
88
+ wpe = nn.Embedding(config.block_size, config.n_embd),
89
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
90
+ ln_f = nn.LayerNorm(config.n_embd),
91
+ ))
92
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
93
+
94
+ # weight sharing
95
+ self.transformer.wte.weight = self.lm_head.weight
96
+
97
+ # weight initialization
98
+ self.apply(self._init_weights)
99
+
100
+ def _init_weights(self, module):
101
+ if isinstance(module, nn.Linear):
102
+ std = 0.02
103
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
104
+ std *= (2 * self.config.n_layer) ** -0.5
105
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
106
+ if module.bias is not None:
107
+ torch.nn.init.zeros_(module.bias)
108
+ elif isinstance(module, nn.Embedding):
109
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
110
+
111
+ def forward(self, idx, targets=None):
112
+ # idx is of shape (B, T)
113
+ B, T = idx.size()
114
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
115
+ # forward the token and posisition embeddings
116
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
117
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
118
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
119
+ x = tok_emb + pos_emb
120
+ # forward the blocks of the transformer
121
+ for block in self.transformer.h:
122
+ x = block(x)
123
+ # forward the final layernorm and the classifier
124
+ x = self.transformer.ln_f(x)
125
+ logits = self.lm_head(x) # (B, T, vocab_size)
126
+ loss = None
127
+ if targets is not None:
128
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
129
+ return logits, loss