schoginitoys commited on
Commit
5ebdf85
·
verified ·
1 Parent(s): a3859d4

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +490 -37
src/streamlit_app.py CHANGED
@@ -1,40 +1,493 @@
1
- import altair as alt
 
 
 
2
  import numpy as np
 
 
 
3
  import pandas as pd
4
- import streamlit as st
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ st.set_page_config(page_title="GPT-2 Attention Explorer", layout="wide")
3
+
4
+ import torch
5
  import numpy as np
6
+ from transformers import GPT2TokenizerFast, GPT2Model
7
+ import seaborn as sns
8
+ import matplotlib.pyplot as plt
9
  import pandas as pd
 
10
 
11
+ @st.cache_resource
12
+ def load_model():
13
+ tokenizer = GPT2TokenizerFast.from_pretrained("./models")
14
+ model = GPT2Model.from_pretrained("./models", output_attentions=True, attn_implementation="eager")
15
+ model.eval()
16
+ return tokenizer, model
17
+
18
+ tokenizer, model = load_model()
19
+
20
+ st.title("🧠 GPT-2 Token Inspector + Self-Attention Visualizer")
21
+
22
+ with st.expander("📊 GPT-2 Model Architecture Summary"):
23
+ st.markdown("""
24
+ - **Vocabulary size (V):** `50257`
25
+ - **Embedding dimension (d):** `768`
26
+ - **Max Position Length (L):** `1024`
27
+ - **Transformer Layers:** `12`
28
+ - **Attention Heads per Layer:** `12`
29
+ - **Per-head Dimension (dₖ):** `64`
30
+ - **Feedforward Hidden Layer Size:** `3072`
31
+ - **Total Parameters:** ~117 million
32
+ """)
33
+
34
+
35
+ sentence = st.text_input("Enter a sentence:", "The cat sat on the mat")
36
+
37
+ if st.button("Analyze & Visualize") and sentence.strip():
38
+
39
+ inputs = tokenizer(sentence, return_tensors='pt', return_offsets_mapping=True, return_special_tokens_mask=True)
40
+ token_ids = inputs['input_ids'][0]
41
+ tokens = tokenizer.convert_ids_to_tokens(token_ids)
42
+ position_ids = torch.arange(token_ids.shape[0]).unsqueeze(0)
43
+
44
+ inputs.pop("special_tokens_mask", None)
45
+ inputs.pop("offset_mapping", None)
46
+
47
+ with torch.no_grad():
48
+ outputs = model(**inputs, position_ids=position_ids)
49
+
50
+ attentions = outputs.attentions
51
+ embeddings = outputs.last_hidden_state[0].numpy()
52
+
53
+ pos_embedding_layer = model.wpe
54
+ pos_embeddings = pos_embedding_layer(position_ids).squeeze(0).detach().numpy()
55
+
56
+ word_embedding_layer = model.wte
57
+ word_embeddings = word_embedding_layer(token_ids).detach().numpy()
58
+
59
+ final_input = word_embeddings + pos_embeddings
60
+
61
+ # 1. BPE Tokens
62
+ st.subheader("🧾 Byte Pair Encoded Tokens (BPE)")
63
+ st.markdown("GPT-2 uses **Byte Pair Encoding (BPE)** to split input text into subword units.")
64
+ st.code(" ".join(tokens))
65
+
66
+ # 2. Token IDs
67
+ st.subheader("🔢 Token IDs")
68
+ st.markdown("Each token is mapped to an integer ID using the GPT-2 vocabulary.")
69
+ st.code(token_ids.tolist())
70
+
71
+ # 3. Word Embeddings
72
+ st.subheader("💎 Raw Word Embeddings (first 5 tokens)")
73
+ st.markdown("Each token ID is used to lookup a learnable word embedding vector:")
74
+ st.latex(r"\text{Embedding}(t_i) = \mathbf{E}[t_i]")
75
+ st.markdown(r"Where $\mathbf{E} \in \mathbb{R}^{V \times d}$ with $V$ = vocab size and $d = 768$.")
76
+ df_word_embed = pd.DataFrame(word_embeddings[:5])
77
+ df_word_embed.index = [f"{i}: {tok}" for i, tok in enumerate(tokens[:5])]
78
+ st.dataframe(df_word_embed.style.format(precision=4))
79
+
80
+ # 4. Positional Encodings
81
+ st.subheader("🧭 Positional Encodings (first 5 tokens)")
82
+ st.markdown("GPT-2 adds learned positional vectors from a table indexed by position:")
83
+ st.latex(r"\text{PosEnc}(i) = \mathbf{P}[i]")
84
+
85
+ st.markdown("Example (first 5 positions, first 5 dimensions):")
86
+ df_pos_example = pd.DataFrame(pos_embeddings[:5, :5],
87
+ columns=[f"dim {i}" for i in range(5)],
88
+ index=[f"{i}: {tok}" for i, tok in enumerate(tokens[:5])])
89
+ st.dataframe(df_pos_example.style.format(precision=5))
90
+
91
+ st.markdown(r"Where $\mathbf{P} \in \mathbb{R}^{L \times d}$ is learned and not sinusoidal in GPT-2.")
92
+
93
+ # 5. Final Input Vectors
94
+ st.subheader("🧮 Final Input = Word Embedding + Positional Encoding")
95
+ st.markdown("These are the actual vectors passed into the first transformer block:")
96
+ st.latex(r"\mathbf{X}_i = \text{Embedding}(t_i) + \text{PosEnc}(i)")
97
+
98
+ st.markdown("Let's confirm this by showing:")
99
+ st.code("final_input[i][j] ≈ word_embedding[i][j] + pos_embedding[i][j]")
100
+
101
+ for i in range(2): # for first 2 tokens
102
+ df_sum_example = pd.DataFrame({
103
+ 'Word': word_embeddings[i, :5],
104
+ 'PosEnc': pos_embeddings[i, :5],
105
+ 'Final Input': final_input[i, :5],
106
+ 'Word + Pos': word_embeddings[i, :5] + pos_embeddings[i, :5]
107
+ })
108
+ df_sum_example.index = [f"dim {j}" for j in range(5)]
109
+ st.markdown(f"**Token {i}: `{tokens[i]}`**")
110
+ st.dataframe(df_sum_example.style.format(precision=5))
111
+
112
+ # 6. Output Embeddings
113
+ st.subheader("📐 Output Embedding Vectors (first 5 tokens)")
114
+ st.markdown("These are the final hidden states after passing through all transformer layers:")
115
+ st.latex(r"\text{Output}_i = \text{TransformerLayers}(\mathbf{X}_i)")
116
+
117
+ df_embed_example = pd.DataFrame(embeddings[:5, :5],
118
+ columns=[f"dim {j}" for j in range(5)],
119
+ index=[f"{i}: {tok}" for i, tok in enumerate(tokens[:5])])
120
+ st.dataframe(df_embed_example.style.format(precision=5))
121
+
122
+ st.markdown("📌 These are **not** equal to the input vectors—they are fully context-aware representations!")
123
+
124
+ # 🔄 Move sliders here just above heatmap
125
+ layer_num = st.slider("Select Transformer Layer", 0, model.config.n_layer - 1, 0)
126
+ head_num = st.slider("Select Attention Head", 0, model.config.n_head - 1, 0)
127
+ attn = attentions[layer_num][0, head_num].numpy()
128
+
129
+ # 7. Attention Heatmap
130
+ st.subheader(f"🎯 Attention Heatmap — Layer {layer_num+1}, Head {head_num+1}")
131
+ st.markdown("This shows how each token attends to others in the sequence:")
132
+ st.latex(r"\text{Attention}(Q, K, V) = \text{softmax} \left( \frac{QK^\top}{\sqrt{d_k}} \right) V")
133
+ fig, ax = plt.subplots(figsize=(8, 6))
134
+ sns.heatmap(attn, xticklabels=tokens, yticklabels=tokens, cmap="YlOrRd", annot=True, fmt=".2f", ax=ax)
135
+ ax.set_xlabel("Key Tokens")
136
+ ax.set_ylabel("Query Tokens")
137
+ st.pyplot(fig)
138
+
139
+ # 8. Attention Head Breakdown (for token 0)
140
+ st.subheader("🔍 Attention Head Breakdown (1 Token)")
141
+
142
+ st.markdown("Let's inspect how **GPT-2 computes attention for a single token** (first token in the sequence).")
143
+
144
+ # Fetch weight matrix for Q, K, V from the model's first block
145
+ # block = model.transformer.h[0] # Use layer 0
146
+ block = model.h[0] # ✅ Correct for GPT2Model
147
+
148
+ # W_qkv = block.attn.c_attn.weight.detach().numpy().T # shape (768, 3*768)
149
+ W_qkv = block.attn.c_attn.weight.detach().numpy() # ✅ shape (2304, 768)
150
+
151
+ b_qkv = block.attn.c_attn.bias.detach().numpy() # shape (3*768,)
152
+
153
+
154
+ # Final input for token 0
155
+ x0 = final_input[0] # shape (768,)
156
+
157
+ # Linear projection for Q, K, V
158
+ qkv = x0 @ W_qkv + b_qkv # shape (3*768,)
159
+ Q, K, V = np.split(qkv, 3)
160
+
161
+ # Show Q, K, V for head 0
162
+ Q0 = Q[:64]
163
+ K0_all = K.reshape(12, 64) # For all heads
164
+ V0_all = V.reshape(12, 64)
165
+
166
+ K0 = K0_all[0]
167
+ V0 = V0_all[0]
168
+
169
+ # Dot product and softmax
170
+ score = Q0 @ K0.T # scalar
171
+ scaled_score = score / np.sqrt(64)
172
+ softmax_weight = np.exp(scaled_score) / np.sum(np.exp(scaled_score))
173
+
174
+ attn_output = softmax_weight * V0 # simulated for 1 token self-attending to itself
175
+
176
+ st.markdown("### Formula Recap")
177
+
178
+ st.latex(r"Q = x W^Q,\quad K = x W^K,\quad V = x W^V")
179
+
180
+ st.latex(r"\text{Attention}(Q, K, V) = \text{softmax}\left(\frac{QK^\top}{\sqrt{d_k}}\right)V")
181
+
182
+
183
+ # Show Q0, K0, softmax and V0
184
+ df_breakdown = pd.DataFrame({
185
+ "Q₀": Q0,
186
+ "K₀": K0,
187
+ "Q₀·K₀": Q0 * K0,
188
+ "V₀": V0,
189
+ "AttnOut": attn_output
190
+ })
191
+ df_breakdown.index = [f"dim {i}" for i in range(64)]
192
+ st.dataframe(df_breakdown.style.format(precision=5))
193
+
194
+
195
+ st.markdown("### 🧮 Self-Attention Matrix Shape Annotations")
196
+
197
+ st.markdown("""
198
+ **Key tensor dimensions involved in attention computation:**
199
+
200
+ - `W_qkv`: **(2304, 768)** – learned projection matrix for Q, K, V combined
201
+ - `b_qkv`: **(2304,)** – bias vector
202
+ - `X`: **(5, 768)** – input vectors for 5 tokens
203
+ - `qkv_all = X @ W_qkv + b_qkv`: → **(5, 2304)**
204
+ - `Q_all, K_all, V_all = np.split(qkv_all, 3)`: → each **(5, 768)**
205
+ - `Q0, K0, V0 = [:, :64]`: head 0 slice → **(5, 64)**
206
+ - `q0 @ K0.T`: **(1, 64) × (64, 5)** → **(1, 5)**
207
+ - `softmax_weights`: **(1, 5)**
208
+ - `attn_output = softmax_weights @ V0`: **(1, 64)**
209
+ """)
210
+
211
+
212
+
213
+ # 9. Matrix-Level Self-Attention (Token 0 → All)
214
+ st.subheader("🔬 Matrix-Level Self-Attention (Token 0 → All)")
215
+
216
+ st.markdown("""
217
+ This section shows how **Token 0** attends to all other tokens using matrix-level self-attention.
218
+ We compute the dot products, apply softmax, and produce the output for head 0 in layer 0.
219
+ """)
220
+
221
+ # Use same block
222
+ block = model.h[0]
223
+ W_qkv = block.attn.c_attn.weight.detach().numpy() # (2304, 768)
224
+ b_qkv = block.attn.c_attn.bias.detach().numpy() # (2304,)
225
+
226
+ X = final_input[:5] # (5, 768)
227
+
228
+ # Compute Q, K, V for all 5 tokens
229
+ # qkv_all = X @ W_qkv.T + b_qkv # shape (5, 2304)
230
+ qkv_all = X @ W_qkv + b_qkv # ✅ (5 × 768) @ (768 × 2304)
231
+
232
+ Q_all, K_all, V_all = np.split(qkv_all, 3, axis=1)
233
+
234
+ # Head 0 slices
235
+ Q0 = Q_all[:, :64] # (5, 64)
236
+ K0 = K_all[:, :64] # (5, 64)
237
+ V0 = V_all[:, :64] # (5, 64)
238
+
239
+ # Compute raw attention scores for token 0
240
+ q0 = Q0[0].reshape(1, 64) # (1, 64)
241
+ attn_scores = q0 @ K0.T # (1, 5)
242
+ scaled_scores = attn_scores / np.sqrt(64)
243
+ softmax_weights = np.exp(scaled_scores)
244
+ softmax_weights /= softmax_weights.sum(axis=-1, keepdims=True) # shape (1, 5)
245
+
246
+ # Weighted sum of V0 rows
247
+ attn_output_0 = softmax_weights @ V0 # (1, 64)
248
+
249
+ # Display matrices
250
+ st.markdown("### Raw Scaled Attention Scores (Q₀Kᵀ / √dₖ):")
251
+ df_scores = pd.DataFrame(scaled_scores[0], columns=["Score"], index=[f"Token {i}" for i in range(5)])
252
+ st.dataframe(df_scores.style.format(precision=5))
253
+
254
+ st.markdown("### Softmax Attention Weights αᵢ:")
255
+ df_weights = pd.DataFrame(softmax_weights[0], columns=["Weight αᵢ"], index=[f"Token {i}" for i in range(5)])
256
+ st.dataframe(df_weights.style.format(precision=5))
257
+
258
+ st.markdown("### Value Vᵢ vectors (Head 0, first 5 dims):")
259
+ df_values = pd.DataFrame(V0[:, :5], columns=[f"dim {i}" for i in range(5)],
260
+ index=[f"Token {i}" for i in range(5)])
261
+ st.dataframe(df_values.style.format(precision=5))
262
+
263
+ st.markdown("### Final Attention Output (weighted sum of Vᵢ):")
264
+ df_attn_out = pd.DataFrame(attn_output_0[:, :5], columns=[f"dim {i}" for i in range(5)],
265
+ index=["AttnOut₀"])
266
+ st.dataframe(df_attn_out.style.format(precision=5))
267
+
268
+
269
+ # 10. Per-Head Projection Matrices
270
+ st.subheader("🧬 Per-Head Projection Matrices (Wq, Wk, Wv)")
271
+
272
+ st.markdown("""
273
+ In GPT-2, each attention **head has its own set of projection weights** to compute Queries (Q), Keys (K), and Values (V) from the input vector.
274
+
275
+ The full `W_qkv` layer maps from **(768,) → (2304,)** and is split into 3 parts:
276
+ - `Wq` = first 768 columns → shape `(768, 768)`
277
+ - `Wk` = next 768 columns → shape `(768, 768)`
278
+ - `Wv` = last 768 columns → shape `(768, 768)`
279
+
280
+ Each head receives a unique slice from each projection:
281
+ - 12 heads × 64 dimensions = 768
282
+ - So head 0 → `Wq[:, :64]`, head 1 → `Wq[:, 64:128]`, etc.
283
+ """)
284
+
285
+ block = model.h[0]
286
+ W_qkv_full = block.attn.c_attn.weight.detach().numpy().T # shape (768, 2304)
287
+ W_q, W_k, W_v = np.split(W_qkv_full, 3, axis=1) # each: (768, 768)
288
+
289
+ # Show Wq head 0 and 1
290
+ Wq_head0 = W_q[:, :64]
291
+ Wq_head1 = W_q[:, 64:128]
292
+
293
+ df_q = pd.DataFrame({
294
+ "Wq_head0": Wq_head0[:5, 0],
295
+ "Wq_head1": Wq_head1[:5, 0]
296
+ }, index=[f"dim {i}" for i in range(5)])
297
+ st.markdown("### Wq projection weights for head 0 vs head 1 (first 5 input dims → output dim 0):")
298
+ st.dataframe(df_q.style.format(precision=5))
299
+
300
+ # Show Wk and Wv for head 0
301
+ Wk_head0 = W_k[:, :64]
302
+ Wv_head0 = W_v[:, :64]
303
+
304
+ df_kv = pd.DataFrame({
305
+ "Wk_head0": Wk_head0[:5, 0],
306
+ "Wv_head0": Wv_head0[:5, 0]
307
+ }, index=[f"dim {i}" for i in range(5)])
308
+ st.markdown("### Wk and Wv projection weights for head 0 (first 5 input dims → output dim 0):")
309
+ st.dataframe(df_kv.style.format(precision=5))
310
+
311
+ st.markdown("""
312
+ ✅ This confirms that each head has **distinct projections** for Q, K, and V.
313
+ The same input `x` is transformed differently per head, allowing GPT-2 to learn different attention perspectives.
314
+ """)
315
+
316
+
317
+ # 11 · 📐 How W_qkv Projects an Input Vector into Q, K, V
318
+ st.subheader("📐 How W_qkv Projects an Input Vector → Q, K, V")
319
+
320
+ st.markdown("""
321
+ In GPT-2, the combined projection layer `c_attn` maps a single input embedding
322
+ into a concatenated vector that contains **Q, K, and V**.
323
+
324
+ Each of these is 768-dimensional, so the full output is 768 × 3 = 2304.
325
+ """)
326
+
327
+ st.latex(r"x \in \mathbb{R}^{768} \quad \rightarrow \quad [Q \;|\; K \;|\; V] \in \mathbb{R}^{2304}")
328
+
329
+ st.markdown("---")
330
+
331
+ st.markdown("### 🧪 Mini GPT Example (3D → 6D Projection)")
332
+
333
+ st.markdown("Imagine a tiny model:")
334
+
335
+ st.markdown("""
336
+ - Input vector `x ∈ ℝ³`
337
+ - Q, K, V are each 2D → total output = 6D
338
+ - Thus:
339
+ """)
340
+
341
+ st.latex(r"W_{\text{qkv}} \in \mathbb{R}^{6 \times 3}, \quad b_{\text{qkv}} \in \mathbb{R}^6")
342
+
343
+ # Miniature input vector and projection weights
344
+ mini_x = np.array([1.0, 2.0, 3.0]) # (3,)
345
+ mini_W = np.array( # (6, 3)
346
+ [
347
+ [0.1, 0.2, 0.3], # → Q₁
348
+ [0.4, 0.5, 0.6], # → Q₂
349
+ [0.7, 0.8, 0.9], # → K₁
350
+ [1.0, 1.1, 1.2], # → K���
351
+ [1.3, 1.4, 1.5], # → V₁
352
+ [1.6, 1.7, 1.8], # → V₂
353
+ ]
354
+ )
355
+ mini_b = np.array([0.01, 0.02, 0.03, 0.04, 0.05, 0.06]) # (6,)
356
+
357
+ mini_out = mini_W @ mini_x + mini_b # (6,)
358
+ Qm, Km, Vm = np.split(mini_out, 3) # each (2,)
359
+
360
+ st.code("Input vector x = [1.0, 2.0, 3.0] # shape (3,)")
361
+ st.code("W_qkv shape = (6, 3) # maps 3 → 6")
362
+
363
+ st.code(f"Output = W_qkv @ x + b = {mini_out.round(2).tolist()}")
364
+
365
+ df_mini = pd.DataFrame(
366
+ {
367
+ "Q": Qm.round(2),
368
+ "K": Km.round(2),
369
+ "V": Vm.round(2)
370
+ },
371
+ index=["dim 1", "dim 2"]
372
+ )
373
+
374
+ st.markdown("**Split into Q, K, V (each 2D):**")
375
+ st.dataframe(df_mini.style.format(precision=2))
376
+
377
+ st.markdown("---")
378
+
379
+ st.markdown("### 📏 Real GPT-2 Projection Shapes")
380
+
381
+ df_shapes = pd.DataFrame({
382
+ "Tensor": [
383
+ "Input x",
384
+ "W_qkv (linear layer)",
385
+ "b_qkv (bias)",
386
+ "Output = x @ W_qkv + b",
387
+ "Q / K / V each",
388
+ "Head reshaping"
389
+ ],
390
+ "Shape": [
391
+ "(768,)",
392
+ "(2304, 768)",
393
+ "(2304,)",
394
+ "(2304,)",
395
+ "(768,)",
396
+ "12 heads × 64 dims = 768"
397
+ ]
398
+ })
399
+ st.dataframe(df_shapes)
400
+
401
+ st.markdown("""
402
+ Each attention **head** gets its own slice:
403
+ - Q_head₀ = Q[:, :64]
404
+ - K_head₀ = K[:, :64]
405
+ - V_head₀ = V[:, :64]
406
+
407
+ That’s how one input vector creates multi-headed Q, K, and V for scaled dot-product attention.
408
+ """)
409
+
410
+
411
+ st.subheader("Additional notes:")
412
+ st.markdown(
413
+ """
414
+ ---
415
+
416
+ ## 🧠 What Does `Ġ` Mean?
417
+
418
+ The character `Ġ` (U+0120: Latin Capital Letter G with dot above) is used to:
419
+
420
+ > **Represent a leading space** before the token.
421
+
422
+ ---
423
+
424
+ ### ✅ Example:
425
+
426
+ Let’s look at a sentence:
427
+
428
+ ```
429
+ "The cat sat on the mat"
430
+ ```
431
+
432
+ When tokenized using GPT-2 tokenizer (`GPT2TokenizerFast`), it becomes:
433
+
434
+ ```
435
+ ['The', 'Ġcat', 'Ġsat', 'Ġon', 'Ġthe', 'Ġmat']
436
+ ```
437
+
438
+ * `'The'` → First word, no leading space.
439
+ * `'Ġcat'` → Space + "cat"
440
+ * `'Ġsat'` → Space + "sat"
441
+ * etc.
442
+
443
+ So `Ġ` means:
444
+
445
+ > "This token starts after a space."
446
+
447
+ ---
448
+
449
+ ### ⚠️ Why Not Just Use `" "`?
450
+
451
+ Because GPT-2 uses a **vocabulary of subword units** (BPE). These tokens are strings, not raw characters or bytes. Including space as a separate token would have complicated the merge process. So:
452
+
453
+ * `Ġ` = internal marker used in the vocabulary file
454
+ * It's not a space character but tells the tokenizer "insert space before decoding this."
455
+
456
+ ---
457
+
458
+ ### ✅ When Detokenizing
459
+
460
+ The tokenizer **removes the `Ġ` and adds a space** during decoding:
461
+
462
+ ```python
463
+ from transformers import GPT2TokenizerFast
464
+
465
+ tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
466
+
467
+ tokens = tokenizer.tokenize("The cat sat on the mat")
468
+ print(tokens)
469
+ # ['The', 'Ġcat', 'Ġsat', 'Ġon', 'Ġthe', 'Ġmat']
470
+
471
+ ids = tokenizer.convert_tokens_to_ids(tokens)
472
+ decoded = tokenizer.decode(ids)
473
+ print(decoded)
474
+ # 'The cat sat on the mat'
475
+ ```
476
+
477
+ ---
478
+
479
+ ## ✅ Summary
480
+
481
+ | Token | Interprets As |
482
+ | -------- | ------------------------- |
483
+ | `'The'` | `'The'` (no space before) |
484
+ | `'Ġcat'` | `' cat'` |
485
+ | `'Ġsat'` | `' sat'` |
486
+ | `'Ġon'` | `' on'` |
487
+ | `'Ġthe'` | `' the'` |
488
+ | `'Ġmat'` | `' mat'` |
489
+
490
+ Would you like to include this as an educational block in your Streamlit app too?
491
+
492
+
493
+ """)