SoggyBurritos commited on
Commit
da03cc7
·
verified ·
1 Parent(s): 7fb5a49

Upload 4 files

Browse files
app/main.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /app/main.py
2
+ import torch, os
3
+ from importlib.metadata import version
4
+ import streamlit as st
5
+ import tiktoken
6
+ from pathlib import Path
7
+ from scripts import MultiHeadAttention, LayerNorm, GELU, FeedForward, TransformerBlock, GPTModel, build_old_policy
8
+
9
+ # library = ["numpy", "torch", "tensorflow", "streamlit", "pandas", "tiktoken"]
10
+ # for lib in library:
11
+ # st.write(f"{lib} version: {version(lib)}")
12
+
13
+ # Set basic page configuration (optional, but good for wider layouts)
14
+ st.set_page_config(
15
+ page_title="Spam or Ham",
16
+ page_icon="🤖",
17
+ layout="centered", # or "wide"
18
+ initial_sidebar_state="collapsed"
19
+ )
20
+
21
+ # BUILD THE CLASSIFIER POLICY MODEL
22
+ @st.cache_resource
23
+ def load_model_and_tokenizer():
24
+ # --- CONFIGURATION ---
25
+ BASE_CONFIG = {
26
+ "vocab_size": 50257, # Vocabulary size
27
+ "context_length": 1024, # Context length
28
+ "drop_rate": 0.1, # Dropout rate
29
+ "qkv_bias": True # Query-key-value bias
30
+ }
31
+ policy = build_old_policy(base_config=BASE_CONFIG, chosen_model="gpt2-small (124M)", num_classes=2)
32
+
33
+ model_parameters_path= Path("./app/models/Spam-Classifier-GPT2-Model.pt") # Factor in that the docker image will start in a different working directory (see Dockerfile)
34
+
35
+ if not model_parameters_path.exists():
36
+ st.error(f"Model Parameter file not found at: {model_parameters_path}. Please ensure it's in the correct location.")
37
+ st.stop() # Stop the script
38
+
39
+
40
+ policy.load_state_dict(torch.load(f=model_parameters_path, weights_only=True, map_location='cpu'))
41
+ policy.to('cpu')
42
+ tokenizer = tiktoken.get_encoding("gpt2")
43
+
44
+ return policy.eval(), tokenizer
45
+
46
+ st.title("Spam Classifier Agent!")
47
+
48
+ # https://docs.streamlit.io/develop/api-reference/widgets/st.text_area
49
+ text_block = st.text_area(label="Enter your text to classify if it is SPAM or NOT SPAM", placeholder ="ConGratulations!!!1 You won $1.000. Click the link beelow to claime you're Prize.!")
50
+
51
+
52
+ # --- Add a button to trigger analysis ---
53
+ if st.button("Analyze Text"):
54
+ if text_block: # Run if there is an input ; maybe introduce a 'submit' button
55
+
56
+ policy, tokenizer = load_model_and_tokenizer()
57
+
58
+ # Tokenize the input string and restrict it to the model's context length
59
+ tokenized_input = tokenizer.encode(text_block)[-policy.pos_emb.num_embeddings:]
60
+
61
+ batched_input = torch.tensor(data=tokenized_input).unsqueeze(0) # turn the tokenized input into a tensor and add a batch dimension
62
+ with torch.no_grad():
63
+ logits = policy(batched_input)[:,-1,:] # Run the logits through the model and extract the probabilities of the last timestep
64
+
65
+ prediction_index = torch.argmax(input=logits, dim=-1).item() # Get the prediction of the model
66
+
67
+ prediction_label = "SPAM" if prediction_index == 1 else "NOT SPAM" # Map the prediction index to a label
68
+
69
+ # --- Streamlit Output ---
70
+ st.subheader("Classification Result:")
71
+ st.write("---") # Separator
72
+
73
+ st.markdown(f"**Classification:**")
74
+ if prediction_label == "SPAM":
75
+ st.error(f"Prediction: {prediction_label} 🚨") # Red box for spam
76
+ else:
77
+ st.success(f"Prediction: {prediction_label} ✅") # Green box for not spam
78
+
79
+ # Optional: Show probabilities
80
+ softmax_probs = torch.nn.functional.softmax(logits, dim=-1)
81
+ st.info(f"Probabilities: SPAM={softmax_probs[0, 1]:.4f}, NOT SPAM={softmax_probs[0, 0]:.4f}")
82
+
83
+ st.write("---") # Another separator
84
+ else:
85
+ st.warning("Please enter some text in the text area before clicking 'Analyze Text'.")
app/models/Spam-Classifier-GPT2-Model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5e86fedfc703659ea74a2d636d3c2a7b21d1c07b20cd0f6013cf87638625540
3
+ size 548173616
app/scripts/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #/scripts/__init__.py
2
+
3
+ from .modules import *
4
+
5
+ __all__ = ["MultiHeadAttention", "LayerNorm", "GELU", "FeedForward", "TransformerBlock", "GPTModel", "build_old_policy"]
app/scripts/modules.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /streamlit/app/scripts/modules.py
2
+ # Copyright (c) Sebastian Raschka under Apache License 2.0 (see LICENSE.txt).
3
+ # Source for "Build a Large Language Model From Scratch"
4
+ # - https://www.manning.com/books/build-a-large-language-model-from-scratch
5
+ # Code: https://github.com/rasbt/LLMs-from-scratch
6
+
7
+ # This file has been modified by [Brian Perez] for the [Spam_Classifier_Agent] project.
8
+ # The modifications are licensed under the same Apache License, Version 2.0.
9
+ import torch
10
+ import torch.nn as nn
11
+
12
+ #####################################
13
+ # Chapter 3
14
+ #####################################
15
+ class MultiHeadAttention(nn.Module):
16
+ def __init__(self, d_in, d_out, context_length, dropout, num_heads, qkv_bias=False):
17
+ super().__init__()
18
+ assert d_out % num_heads == 0, "d_out must be divisible by n_heads"
19
+
20
+ self.d_out = d_out
21
+ self.num_heads = num_heads
22
+ self.head_dim = d_out // num_heads # Reduce the projection dim to match desired output dim
23
+
24
+ self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
25
+ self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
26
+ self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)
27
+ self.out_proj = nn.Linear(d_out, d_out) # Linear layer to combine head outputs
28
+ self.dropout = nn.Dropout(dropout)
29
+ self.register_buffer('mask', torch.triu(torch.ones(context_length, context_length), diagonal=1))
30
+
31
+ def forward(self, x):
32
+ b, num_tokens, d_in = x.shape
33
+
34
+ keys = self.W_key(x) # Shape: (b, num_tokens, d_out)
35
+ queries = self.W_query(x)
36
+ values = self.W_value(x)
37
+
38
+ # We implicitly split the matrix by adding a `num_heads` dimension
39
+ # Unroll last dim: (b, num_tokens, d_out) -> (b, num_tokens, num_heads, head_dim)
40
+ keys = keys.view(b, num_tokens, self.num_heads, self.head_dim)
41
+ values = values.view(b, num_tokens, self.num_heads, self.head_dim)
42
+ queries = queries.view(b, num_tokens, self.num_heads, self.head_dim)
43
+
44
+ # Transpose: (b, num_tokens, num_heads, head_dim) -> (b, num_heads, num_tokens, head_dim)
45
+ keys = keys.transpose(1, 2)
46
+ queries = queries.transpose(1, 2)
47
+ values = values.transpose(1, 2)
48
+
49
+ # Compute scaled dot-product attention (aka self-attention) with a causal mask
50
+ attn_scores = queries @ keys.transpose(2, 3) # Dot product for each head
51
+
52
+ # Original mask truncated to the number of tokens and converted to boolean
53
+ mask_bool = self.mask.bool()[:num_tokens, :num_tokens]
54
+
55
+ # Use the mask to fill attention scores
56
+ attn_scores.masked_fill_(mask_bool, -torch.inf)
57
+
58
+ attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
59
+ attn_weights = self.dropout(attn_weights)
60
+
61
+ # Shape: (b, num_tokens, num_heads, head_dim)
62
+ context_vec = (attn_weights @ values).transpose(1, 2)
63
+
64
+ # Combine heads, where self.d_out = self.num_heads * self.head_dim
65
+ context_vec = context_vec.reshape(b, num_tokens, self.d_out)
66
+ context_vec = self.out_proj(context_vec) # optional projection
67
+
68
+ return context_vec
69
+
70
+ #####################################
71
+ # Chapter 4
72
+ #####################################
73
+ class LayerNorm(nn.Module):
74
+ def __init__(self, emb_dim):
75
+ super().__init__()
76
+ self.eps = 1e-5
77
+ self.scale = nn.Parameter(torch.ones(emb_dim))
78
+ self.shift = nn.Parameter(torch.zeros(emb_dim))
79
+
80
+ def forward(self, x):
81
+ mean = x.mean(dim=-1, keepdim=True)
82
+ var = x.var(dim=-1, keepdim=True, unbiased=False)
83
+ norm_x = (x - mean) / torch.sqrt(var + self.eps)
84
+ return self.scale * norm_x + self.shift
85
+
86
+
87
+ class GELU(nn.Module):
88
+ def __init__(self):
89
+ super().__init__()
90
+
91
+ def forward(self, x):
92
+ return 0.5 * x * (1 + torch.tanh(
93
+ torch.sqrt(torch.tensor(2.0 / torch.pi)) *
94
+ (x + 0.044715 * torch.pow(x, 3))
95
+ ))
96
+
97
+
98
+ class FeedForward(nn.Module):
99
+ def __init__(self, cfg):
100
+ super().__init__()
101
+ self.layers = nn.Sequential(
102
+ nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]),
103
+ GELU(),
104
+ nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]),
105
+ )
106
+
107
+ def forward(self, x):
108
+ return self.layers(x)
109
+
110
+
111
+ class TransformerBlock(nn.Module):
112
+ def __init__(self, cfg):
113
+ super().__init__()
114
+ self.att = MultiHeadAttention(
115
+ d_in=cfg["emb_dim"],
116
+ d_out=cfg["emb_dim"],
117
+ context_length=cfg["context_length"],
118
+ num_heads=cfg["n_heads"],
119
+ dropout=cfg["drop_rate"],
120
+ qkv_bias=cfg["qkv_bias"])
121
+ self.ff = FeedForward(cfg)
122
+ self.norm1 = LayerNorm(cfg["emb_dim"])
123
+ self.norm2 = LayerNorm(cfg["emb_dim"])
124
+ self.drop_resid = nn.Dropout(cfg["drop_rate"])
125
+
126
+ def forward(self, x):
127
+ # Shortcut connection for attention block
128
+ shortcut = x
129
+ x = self.norm1(x)
130
+ x = self.att(x) # Shape [batch_size, num_tokens, emb_size]
131
+ x = self.drop_resid(x)
132
+ x = x + shortcut # Add the original input back
133
+
134
+ # Shortcut connection for feed-forward block
135
+ shortcut = x
136
+ x = self.norm2(x)
137
+ x = self.ff(x)
138
+ x = self.drop_resid(x)
139
+ x = x + shortcut # Add the original input back
140
+
141
+ return x
142
+
143
+
144
+ class GPTModel(nn.Module):
145
+ def __init__(self, cfg):
146
+ super().__init__()
147
+ self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])
148
+ self.pos_emb = nn.Embedding(cfg["context_length"], cfg["emb_dim"])
149
+ self.drop_emb = nn.Dropout(cfg["drop_rate"])
150
+
151
+ self.trf_blocks = nn.Sequential(
152
+ *[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])
153
+
154
+ self.final_norm = LayerNorm(cfg["emb_dim"])
155
+ self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False)
156
+
157
+ def forward(self, in_idx):
158
+ batch_size, seq_len = in_idx.shape # (Batch_size, max_num_tokens)
159
+ tok_embeds = self.tok_emb(in_idx)
160
+ pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device)) # Shape: (max_seq_len, emb_dim)
161
+ x = tok_embeds + pos_embeds # Broadcasting! Resulting Shape=[batch_size, num_tokens, emb_size]
162
+ x = self.drop_emb(x)
163
+ x = self.trf_blocks(x)
164
+ x = self.final_norm(x)
165
+ logits = self.out_head(x)
166
+ return logits
167
+
168
+ def build_old_policy(base_config: dict, chosen_model: str="gpt2-small (124M)", num_classes: int = 2) -> GPTModel:
169
+ """Construct the GPT2 model architecture without loading the weights. Code inspired from: https://github.com/rasbt/LLMs-from-scratch/blob/main/ch06/01_main-chapter-code/ch06.ipynb
170
+ Args:
171
+ base_config (dict): The base configurations of the gpt2 model indicating vocab_size, context_length, drop_rate, and qkv_bias.
172
+ chosen_model (str): The specific gpt2 model to construct.
173
+ num_classes (int): The amount of classes in the classification task.
174
+ Returns:
175
+ model (GPTModel): The constructed Transformer model for classification."""
176
+
177
+ model_configs = {
178
+ "gpt2-small (124M)": {"emb_dim": 768, "n_layers": 12, "n_heads": 12},
179
+ "gpt2-medium (355M)": {"emb_dim": 1024, "n_layers": 24, "n_heads": 16},
180
+ "gpt2-large (774M)": {"emb_dim": 1280, "n_layers": 36, "n_heads": 20},
181
+ "gpt2-xl (1558M)": {"emb_dim": 1600, "n_layers": 48, "n_heads": 25},
182
+ }
183
+
184
+ base_config.update(model_configs[chosen_model]) # Add the emb_dim, n_layers, and n_heads to the config
185
+
186
+ model_size = chosen_model.split(" ")[-1].lstrip("(").rstrip(")")
187
+ allowed_sizes = ("124M", "355M", "774M", "1558M")
188
+ if model_size not in allowed_sizes:
189
+ raise ValueError(f"Model size not in {allowed_sizes}")
190
+ model = GPTModel(base_config)
191
+
192
+ model.out_head = torch.nn.Linear(in_features=base_config["emb_dim"], out_features=num_classes) # Reconfigure the output layer
193
+ return model