EzhirkoArulmozhi commited on
Commit
9d811a8
·
verified ·
1 Parent(s): 8a0f86f

Upload 3 files

Browse files

Created a hugging face application to print Shakespear like text

Files changed (3) hide show
  1. app.py +88 -0
  2. requirement.txt +5 -0
  3. transformers.py +129 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import tiktoken
5
+ from huggingface_hub import hf_hub_download
6
+ from transformers import GPT, GPTConfig # Import your model class
7
+
8
+ # Load the model from Hugging Face Hub
9
+ def load_model_from_huggingface(device):
10
+ # Replace with your Hugging Face model ID (username/model-name)
11
+ model_id = "EzhirkoArulmozhi/DecoderTransformerModel"
12
+ checkpoint_path = hf_hub_download(repo_id=model_id, filename="gpt_checkpoint.pth")
13
+
14
+ checkpoint = torch.load(checkpoint_path, map_location=device)
15
+ config = checkpoint['config']
16
+ model = GPT(config)
17
+ model.load_state_dict(checkpoint['model_state_dict'])
18
+ model.to(device)
19
+ model.eval() # Set to evaluation mode
20
+
21
+ # Disable gradient computation
22
+ for param in model.parameters():
23
+ param.requires_grad = False
24
+ return model
25
+
26
+ def generate_text(model, device, prompt, max_length=100, num_samples=1):
27
+ enc = tiktoken.get_encoding('gpt2')
28
+ tokens = enc.encode(prompt)
29
+ tokens = torch.tensor(tokens, dtype=torch.long)
30
+ tokens = tokens.unsqueeze(0).repeat(num_samples, 1)
31
+ tokens = tokens.to(device)
32
+
33
+ with torch.no_grad():
34
+ for _ in range(max_length):
35
+ if tokens.size(1) >= 1024: # GPT context length
36
+ break
37
+
38
+ logits = model(tokens)[0]
39
+ logits = logits[:, -1, :]
40
+ probs = F.softmax(logits, dim=-1)
41
+
42
+ # Top-k sampling
43
+ topk_probs, topk_indices = torch.topk(probs, 50, dim=-1)
44
+ ix = torch.multinomial(topk_probs, 1)
45
+ next_token = torch.gather(topk_indices, -1, ix)
46
+
47
+ tokens = torch.cat((tokens, next_token), dim=1)
48
+
49
+ # Remove special token check entirely
50
+ # Just generate for the specified length or until context limit
51
+
52
+ generated_texts = []
53
+ for i in range(num_samples):
54
+ text = enc.decode(tokens[i].tolist())
55
+ generated_texts.append(text)
56
+
57
+ return '\n\n---\n\n'.join(generated_texts)
58
+
59
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
60
+ model = load_model_from_huggingface(device)
61
+ # Force model to stay in eval mode
62
+ model.train(False)
63
+
64
+ # Create Gradio interface
65
+ iface = gr.Interface(
66
+ fn=generate_text,
67
+ inputs=[
68
+ model,
69
+ device,
70
+ gr.Textbox(label="Prompt", value="We are accounted poor citizens, the"),
71
+ gr.Slider(minimum=10, maximum=200, value=100, step=1, label="Max Length"),
72
+ gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of Samples"),
73
+ ],
74
+ outputs=gr.Textbox(label="Generated Text"),
75
+ title="Shakespeare-style Text Generator",
76
+ description="Enter a prompt to generate Shakespeare-style text continuation",
77
+ examples=[
78
+ ["O Romeo, Romeo, wherefore art thou", 100, 1],
79
+ ["To be, or not to be, that is", 60, 2],
80
+ ["Friends, Romans, countrymen, lend me", 50, 3],
81
+ ["All the world's a stage, and all the", 100, 1],
82
+ ["Now is the winter of our discontent", 100, 1],
83
+ ["If music be the food of love,", 100, 1],
84
+ ]
85
+ )
86
+
87
+ if __name__ == "__main__":
88
+ iface.launch()
requirement.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ tiktoken
4
+ transformers
5
+ huggingface_hub
transformers.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ class CausalSelfAttention(nn.Module):
7
+
8
+ def __init__(self, config):
9
+ super().__init__()
10
+ assert config.n_embd % config.n_head == 0
11
+ # key, query, value projections for all heads, but in a batch
12
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
13
+ # output projection
14
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
15
+ self.c_proj.NANGPT_SCALE_INIT = 1
16
+ # regularization
17
+ self.n_head = config.n_head
18
+ self.n_embd = config.n_embd
19
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size))
20
+
21
+ def forward(self, x):
22
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
23
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
24
+ # nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
25
+ # e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
26
+ qkv = self.c_attn(x)
27
+ q, k, v = qkv.split(self.n_embd, dim=2)
28
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
29
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
30
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
31
+
32
+ # att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
33
+ # att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf'))
34
+ # att = F.softmax(att, dim=-1)
35
+ # y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
36
+
37
+ y = F.scaled_dot_product_attention(q, k, v, is_causal = True) # Flash attention
38
+
39
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
40
+ # output projection
41
+ y = self.c_proj(y)
42
+ return y
43
+
44
+ class MLP(nn.Module):
45
+
46
+ def __init__(self, config):
47
+ super().__init__()
48
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd)
49
+ self.gelu = nn.GELU(approximate='tanh')
50
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd)
51
+ self.c_proj.NANOGPT_SCALE_INIT = 1
52
+
53
+ def forward(self, x):
54
+ x = self.c_fc(x)
55
+ x = self.gelu(x)
56
+ x = self.c_proj(x)
57
+ return x
58
+
59
+ class Block(nn.Module):
60
+
61
+ def __init__(self, config):
62
+ super().__init__()
63
+ self.ln_1 = nn.LayerNorm(config.n_embd)
64
+ self.attn = CausalSelfAttention(config)
65
+ self.ln_2 = nn.LayerNorm(config.n_embd)
66
+ self.mlp = MLP(config)
67
+
68
+ def forward(self, x):
69
+ x = x + self.attn(self.ln_1(x))
70
+ x = x + self.mlp(self.ln_2(x))
71
+ return x
72
+
73
+ @dataclass
74
+ class GPTConfig:
75
+ block_size: int = 1024 # max sequence length
76
+ vocab_size: int = 50304 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
77
+ n_layer: int = 12 # number of layers
78
+ n_head: int = 12 # number of heads
79
+ n_embd: int = 768 # embedding dimension
80
+
81
+ class GPT(nn.Module):
82
+ def __init__(self, config):
83
+ super().__init__()
84
+ self.config = config
85
+
86
+ self.transformer = nn.ModuleDict(dict(
87
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
88
+ wpe = nn.Embedding(config.block_size, config.n_embd),
89
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
90
+ ln_f = nn.LayerNorm(config.n_embd),
91
+ ))
92
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
93
+
94
+ # weight sharing
95
+ self.transformer.wte.weight = self.lm_head.weight
96
+
97
+ # weight initialization
98
+ self.apply(self._init_weights)
99
+
100
+ def _init_weights(self, module):
101
+ if isinstance(module, nn.Linear):
102
+ std = 0.02
103
+ if hasattr(module, 'NANGPT_SCALE_INIT'):
104
+ std *= (2 * self.config.n_layer) ** -0.5
105
+ torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
106
+ if module.bias is not None:
107
+ torch.nn.init.zeros_(module.bias)
108
+ elif isinstance(module, nn.Embedding):
109
+ torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
110
+
111
+ def forward(self, idx, targets=None):
112
+ # idx is of shape (B, T)
113
+ B, T = idx.size()
114
+ assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
115
+ # forward the token and posisition embeddings
116
+ pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
117
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
118
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
119
+ x = tok_emb + pos_emb
120
+ # forward the blocks of the transformer
121
+ for block in self.transformer.h:
122
+ x = block(x)
123
+ # forward the final layernorm and the classifier
124
+ x = self.transformer.ln_f(x)
125
+ logits = self.lm_head(x) # (B, T, vocab_size)
126
+ loss = None
127
+ if targets is not None:
128
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
129
+ return logits, loss