Rahul2020 commited on
Commit
720fea4
Β·
verified Β·
1 Parent(s): 95f1345

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +314 -0
  2. requirements.txt +3 -0
  3. shakespeare_gpt_fp16.pt +3 -0
app.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Shakespeare Text Generator - Hugging Face Gradio App
3
+ =====================================================
4
+ A GPT model trained on Shakespeare's works to generate text in Shakespearean style.
5
+ """
6
+
7
+ import gradio as gr
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import functional as F
11
+ import math
12
+ from dataclasses import dataclass
13
+ import tiktoken
14
+
15
+ # ============================================================================
16
+ # MODEL ARCHITECTURE (Same as training)
17
+ # ============================================================================
18
+
19
+ class CausalSelfAttention(nn.Module):
20
+ def __init__(self, config):
21
+ super().__init__()
22
+ assert config.n_embd % config.n_head == 0
23
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
24
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
25
+ self.c_proj.NANGPT_SCALE_INIT = 1
26
+ self.n_head = config.n_head
27
+ self.n_embd = config.n_embd
28
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
29
+
30
+ def forward(self, x):
31
+ B, T, C = x.size()
32
+ qkv = self.c_attn(x)
33
+ q, k, v = qkv.split(self.n_embd, dim=2)
34
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
35
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
36
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
37
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
38
+ att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
39
+ att = F.softmax(att, dim=-1)
40
+ y = att @ v
41
+ y = y.transpose(1, 2).contiguous().view(B, T, C)
42
+ y = self.c_proj(y)
43
+ return y
44
+
45
+ class MLP(nn.Module):
46
+ def __init__(self, config):
47
+ super().__init__()
48
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
49
+ self.gelu = nn.GELU(approximate='tanh')
50
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
51
+ self.c_proj.NANOGPT_SCALE_INIT = 1
52
+
53
+ def forward(self, x):
54
+ x = self.c_fc(x)
55
+ x = self.gelu(x)
56
+ x = self.c_proj(x)
57
+ return x
58
+
59
+ class Block(nn.Module):
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.ln_1 = nn.LayerNorm(config.n_embd)
63
+ self.attn = CausalSelfAttention(config)
64
+ self.ln_2 = nn.LayerNorm(config.n_embd)
65
+ self.mlp = MLP(config)
66
+
67
+ def forward(self, x):
68
+ x = x + self.attn(self.ln_1(x))
69
+ x = x + self.mlp(self.ln_2(x))
70
+ return x
71
+
72
+ @dataclass
73
+ class GPTConfig:
74
+ block_size: int = 1024
75
+ vocab_size: int = 50257
76
+ n_layer: int = 12
77
+ n_head: int = 12
78
+ n_embd: int = 768
79
+
80
+ class GPT(nn.Module):
81
+ def __init__(self, config):
82
+ super().__init__()
83
+ self.config = config
84
+ self.transformer = nn.ModuleDict(dict(
85
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
86
+ wpe = nn.Embedding(config.block_size, config.n_embd),
87
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
88
+ ln_f = nn.LayerNorm(config.n_embd),
89
+ ))
90
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
91
+ self.transformer.wte.weight = self.lm_head.weight
92
+ self.apply(self._init_weights)
93
+
94
+ def _init_weights(self, module):
95
+ if isinstance(module, nn.Linear):
96
+ std = 0.02
97
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
98
+ std *= (2 * self.config.n_layer) ** -0.5
99
+ torch.nn.init.normal_(module.weight, mean=0.0, std=std)
100
+ if module.bias is not None:
101
+ torch.nn.init.zeros_(module.bias)
102
+ elif isinstance(module, nn.Embedding):
103
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
104
+
105
+ def forward(self, idx, targets=None):
106
+ B, T = idx.size()
107
+ assert T <= self.config.block_size
108
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device)
109
+ pos_emb = self.transformer.wpe(pos)
110
+ tok_emb = self.transformer.wte(idx)
111
+ x = tok_emb + pos_emb
112
+ for block in self.transformer.h:
113
+ x = block(x)
114
+ x = self.transformer.ln_f(x)
115
+ logits = self.lm_head(x)
116
+ loss = None
117
+ if targets is not None:
118
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
119
+ return logits, loss
120
+
121
+ # ============================================================================
122
+ # LOAD MODEL
123
+ # ============================================================================
124
+
125
+ print("Loading model...")
126
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
127
+ print(f"Using device: {device}")
128
+
129
+ model = GPT(GPTConfig())
130
+ # Load your trained checkpoint here
131
+ checkpoint = torch.load('shakespeare_gpt_fp16.pt', map_location=device)
132
+ model.load_state_dict({k: v.float() for k, v in checkpoint.items()})
133
+ model.to(device)
134
+ model.eval()
135
+
136
+ enc = tiktoken.get_encoding('gpt2')
137
+
138
+ # ============================================================================
139
+ # GENERATION FUNCTION
140
+ # ============================================================================
141
+
142
+ def generate_shakespeare(prompt, max_length=100, temperature=0.8, top_k=50, num_samples=1):
143
+ """
144
+ Generate Shakespeare-style text from a prompt.
145
+
146
+ Args:
147
+ prompt: Starting text
148
+ max_length: Maximum number of tokens to generate
149
+ temperature: Sampling temperature (higher = more random)
150
+ top_k: Number of top tokens to sample from
151
+ num_samples: Number of different samples to generate
152
+ """
153
+ if not prompt.strip():
154
+ return "Please enter a prompt to generate text."
155
+
156
+ try:
157
+ # Encode the prompt
158
+ tokens = enc.encode(prompt)
159
+ if len(tokens) == 0:
160
+ return "Invalid prompt. Please try again."
161
+
162
+ outputs = []
163
+
164
+ for _ in range(num_samples):
165
+ x = torch.tensor(tokens, dtype=torch.long, device=device).unsqueeze(0)
166
+
167
+ with torch.no_grad():
168
+ for _ in range(max_length):
169
+ # Forward pass
170
+ logits = model(x)[0]
171
+ logits = logits[:, -1, :] / temperature
172
+
173
+ # Top-k sampling
174
+ probs = F.softmax(logits, dim=-1)
175
+ topk_probs, topk_indices = torch.topk(probs, min(top_k, probs.size(-1)), dim=-1)
176
+ ix = torch.multinomial(topk_probs, 1)
177
+ xcol = torch.gather(topk_indices, -1, ix)
178
+ x = torch.cat((x, xcol), dim=1)
179
+
180
+ # Stop if we exceed block size
181
+ if x.size(1) >= model.config.block_size:
182
+ break
183
+
184
+ # Decode the output
185
+ output_tokens = x[0].tolist()
186
+ generated_text = enc.decode(output_tokens)
187
+ outputs.append(generated_text)
188
+
189
+ # Return all samples separated by dividers
190
+ if num_samples == 1:
191
+ return outputs[0]
192
+ else:
193
+ return "\n\n" + "="*60 + "\n\n".join(outputs)
194
+
195
+ except Exception as e:
196
+ return f"Error generating text: {str(e)}"
197
+
198
+ # ============================================================================
199
+ # GRADIO INTERFACE
200
+ # ============================================================================
201
+
202
+ # Create the interface
203
+ with gr.Blocks() as demo:
204
+ gr.Markdown(
205
+ """
206
+ # 🎭 Shakespeare Text Generator
207
+
208
+ Generate text in the style of William Shakespeare using a GPT model trained on his complete works.
209
+ Enter a prompt and watch the Bard's AI apprentice continue the story!
210
+
211
+ **Model Details**: GPT-2 124M architecture trained on Shakespeare's plays and sonnets (Loss: 0.095)
212
+ """
213
+ )
214
+
215
+ with gr.Row():
216
+ with gr.Column(scale=1):
217
+ prompt_input = gr.Textbox(
218
+ label="πŸ“ Enter Your Prompt",
219
+ placeholder="To be or not to be...",
220
+ lines=4,
221
+ value="To be or not to be"
222
+ )
223
+
224
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
225
+ max_length_slider = gr.Slider(
226
+ minimum=20,
227
+ maximum=300,
228
+ value=100,
229
+ step=10,
230
+ label="Max Length (tokens)",
231
+ info="Maximum number of tokens to generate"
232
+ )
233
+
234
+ temperature_slider = gr.Slider(
235
+ minimum=0.1,
236
+ maximum=1.5,
237
+ value=0.8,
238
+ step=0.1,
239
+ label="Temperature",
240
+ info="Higher = more creative, Lower = more focused"
241
+ )
242
+
243
+ top_k_slider = gr.Slider(
244
+ minimum=10,
245
+ maximum=100,
246
+ value=50,
247
+ step=10,
248
+ label="Top-K",
249
+ info="Number of top tokens to sample from"
250
+ )
251
+
252
+ num_samples_slider = gr.Slider(
253
+ minimum=1,
254
+ maximum=3,
255
+ value=1,
256
+ step=1,
257
+ label="Number of Samples",
258
+ info="Generate multiple variations"
259
+ )
260
+
261
+ generate_btn = gr.Button("🎨 Generate", variant="primary", size="lg")
262
+
263
+ with gr.Column(scale=1):
264
+ output_text = gr.Textbox(
265
+ label="πŸ“œ Generated Text",
266
+ lines=15
267
+ )
268
+
269
+ # Examples
270
+ gr.Markdown("### πŸ“š Try These Examples:")
271
+ gr.Examples(
272
+ examples=[
273
+ ["To be or not to be", 100, 0.8, 50, 1],
274
+ ["What's in a name?", 120, 0.7, 40, 1],
275
+ ["All the world's a stage", 150, 0.9, 50, 1],
276
+ ["Romeo, Romeo, wherefore art thou", 100, 0.8, 50, 1],
277
+ ["Friends, Romans, countrymen", 130, 0.75, 45, 1],
278
+ ["Now is the winter of our discontent", 110, 0.85, 50, 1],
279
+ ],
280
+ inputs=[prompt_input, max_length_slider, temperature_slider, top_k_slider, num_samples_slider],
281
+ outputs=output_text,
282
+ fn=generate_shakespeare,
283
+ cache_examples=False
284
+ )
285
+
286
+ # Connect the button
287
+ generate_btn.click(
288
+ fn=generate_shakespeare,
289
+ inputs=[prompt_input, max_length_slider, temperature_slider, top_k_slider, num_samples_slider],
290
+ outputs=output_text
291
+ )
292
+
293
+ gr.Markdown(
294
+ """
295
+ ---
296
+ ### πŸ’‘ Tips for Best Results:
297
+ - Start with famous Shakespeare quotes for coherent continuations
298
+ - Use **lower temperature** (0.5-0.7) for more focused, coherent text
299
+ - Use **higher temperature** (0.9-1.2) for more creative, diverse outputs
300
+ - Adjust **Top-K** to control vocabulary diversity
301
+ - Try generating multiple samples to see different variations
302
+
303
+ ### ⚠️ Note:
304
+ This model was trained on Shakespeare's works and will generate text in Early Modern English style.
305
+ Results may vary based on the prompt and parameters.
306
+ """
307
+ )
308
+
309
+ # ============================================================================
310
+ # LAUNCH
311
+ # ============================================================================
312
+
313
+ if __name__ == "__main__":
314
+ demo.launch(share=False)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch>=2.0.0
2
+ gradio>=4.0.0
3
+ tiktoken>=0.5.0
shakespeare_gpt_fp16.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9b79c5d5951dc9234779974e12132032f76cecc0a2f1d99a33a8cf2c803506f
3
+ size 351291857