ibrahimmkhalid commited on
Commit
5552ab6
·
1 Parent(s): 09b0ea4

add basic BiGram testing

Browse files
bigram_testing.sync.ipynb ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 13,
6
+ "id": "864d6f55",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "cuda\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "import torch\n",
19
+ "import torch.nn as nn\n",
20
+ "from torch.nn import functional as F\n",
21
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
22
+ "print(device)\n",
23
+ "block_size = 8\n",
24
+ "batch_size = 4\n",
25
+ "max_iters = 100000\n",
26
+ "learning_rate = 3e-4\n",
27
+ "eval_every = 5000"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 14,
33
+ "id": "b056ad8b",
34
+ "metadata": {},
35
+ "outputs": [
36
+ {
37
+ "name": "stdout",
38
+ "output_type": "stream",
39
+ "text": [
40
+ "THE SONNETS\n",
41
+ "\n",
42
+ " 1\n",
43
+ "\n",
44
+ "From fairest creatures we desire increase,\n",
45
+ "That thereby beauty’s rose might never die,\n",
46
+ "But as the riper should by time decease,\n",
47
+ "His tender heir might bear his memory:\n",
48
+ "But thou contracted to thine own bright eyes,\n",
49
+ "Feed’st thy light’s flame with self-substantial fuel,\n",
50
+ "Making a famine where abundance lies,\n",
51
+ "Thyself thy foe, to thy sweet self too cruel:\n",
52
+ "Thou that art now the world’s fresh ornament,\n",
53
+ "And only herald to the gaudy spring,\n",
54
+ "Within thine own bud buriest \n"
55
+ ]
56
+ }
57
+ ],
58
+ "source": [
59
+ "with open(\"shakespeare.txt\") as f:\n",
60
+ " text = f.read()\n",
61
+ "print(text[:500])"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 15,
67
+ "id": "b539c516",
68
+ "metadata": {},
69
+ "outputs": [],
70
+ "source": [
71
+ "chars = sorted(set(text))\n",
72
+ "vocab_size = len(chars)"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 16,
78
+ "id": "1a6ecfab",
79
+ "metadata": {},
80
+ "outputs": [
81
+ {
82
+ "name": "stdout",
83
+ "output_type": "stream",
84
+ "text": [
85
+ "Vocab size: 101\n",
86
+ "Text length: 5357910\n"
87
+ ]
88
+ }
89
+ ],
90
+ "source": [
91
+ "print(f\"Vocab size: {vocab_size}\")\n",
92
+ "print(f\"Text length: {len(text)}\")"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": 17,
98
+ "id": "4d12c432",
99
+ "metadata": {},
100
+ "outputs": [],
101
+ "source": [
102
+ "string_to_int = {ch: i for i, ch in enumerate(chars)}\n",
103
+ "int_to_string = {i: ch for i, ch in enumerate(chars)}\n",
104
+ "\n",
105
+ "encode = lambda s: [string_to_int[ch] for ch in s]\n",
106
+ "decode = lambda x: ''.join([int_to_string[i] for i in x])\n",
107
+ "\n",
108
+ "data = torch.tensor(encode(text), dtype=torch.long, device=device)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 18,
114
+ "id": "2ee10568",
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "n = int(0.8 * len(data))\n",
119
+ "train_data = data[:n]\n",
120
+ "val_data = data[n:]"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 19,
126
+ "id": "f8d9963a",
127
+ "metadata": {},
128
+ "outputs": [],
129
+ "source": [
130
+ "def get_batch(split):\n",
131
+ " data = train_data if split == 'train' else val_data\n",
132
+ " ix = torch.randint(len(data) - block_size, (batch_size,))\n",
133
+ " x = torch.stack([data[i:i+block_size] for i in ix])\n",
134
+ " y = torch.stack([data[i+1:i+block_size+1] for i in ix])\n",
135
+ " x, y = x.to(device), y.to(device)\n",
136
+ " return x, y"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": 20,
142
+ "id": "31a09e9f",
143
+ "metadata": {},
144
+ "outputs": [],
145
+ "source": [
146
+ "x, y = get_batch('train')"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": 21,
152
+ "id": "65b12427",
153
+ "metadata": {},
154
+ "outputs": [],
155
+ "source": [
156
+ "\n",
157
+ "class BigramLanguageModel(nn.Module):\n",
158
+ " def __init__(self, vocab_size):\n",
159
+ " super().__init__()\n",
160
+ " self.token_embedding_table = nn.Embedding(vocab_size, vocab_size)\n",
161
+ "\n",
162
+ " def forward(self, index, targets=None):\n",
163
+ " logits = self.token_embedding_table(index)\n",
164
+ " if targets is None:\n",
165
+ " loss = None\n",
166
+ " else:\n",
167
+ " B, T, C = logits.shape\n",
168
+ " logits = logits.view(B*T, C) # reshape to what torch.cross_entropy expects\n",
169
+ " targets = targets.view(B*T)\n",
170
+ " loss = F.cross_entropy(logits, targets) \n",
171
+ " return logits, loss\n",
172
+ " def generate(self, index, max_new_tokens):\n",
173
+ " # index is (B, T) array of indices in the current context\n",
174
+ " for _ in range(max_new_tokens):\n",
175
+ " # get the predictions\n",
176
+ " logits, loss = self.forward(index)\n",
177
+ " # focus only on the last time step\n",
178
+ " logits = logits[:, -1, :] # becomes (B, C)\n",
179
+ " # apply softmax to get probabilities\n",
180
+ " probs = F.softmax(logits, dim=-1) # (B, C)\n",
181
+ " # sample from the distribution\n",
182
+ " index_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n",
183
+ " # append sampled index to the running sequence\n",
184
+ " index = torch.cat((index, index_next), dim=1) # (B, T+1)\n",
185
+ " return index"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": 22,
191
+ "id": "38c1fd2b",
192
+ "metadata": {},
193
+ "outputs": [
194
+ {
195
+ "name": "stdout",
196
+ "output_type": "stream",
197
+ "text": [
198
+ "\txSV……VF'3aFë-,7Xy*KV4RW6S7qêë]Kp:v—ç2G'…F*R—P612O‘dÀ5I7d:hMuaPB,AHAnSp3DhÇ8Æm?‘” -9IWÉi b,p925Lp2)z\n"
199
+ ]
200
+ }
201
+ ],
202
+ "source": [
203
+ "model = BigramLanguageModel(vocab_size).to(device)\n",
204
+ "\n",
205
+ "context = torch.zeros((1,1), dtype=torch.long, device=device)\n",
206
+ "generated_chars = decode(model.generate(context, max_new_tokens=100)[0].tolist())\n",
207
+ "print(generated_chars)"
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "markdown",
212
+ "id": "4a168e17",
213
+ "metadata": {},
214
+ "source": [
215
+ "\n",
216
+ "### Some common optimizers\n",
217
+ "1. **Mean Squared Error (MSE)**: MSE is a common loss function used in regression problems, where the goal is to predict a continuous output. It measures the average squared difference between the predicted and actual values, and is often used to train neural networks for regression tasks.\n",
218
+ "2. **Gradient Descent (GD):** is an optimization algorithm used to minimize the loss function of a machine learning model. The loss function measures how well the model is able to predict the target variable based on the input features. The idea of GD is to iteratively adjust the model parameters in the direction of the steepest descent of the loss function\n",
219
+ "3. **Momentum**: Momentum is an extension of SGD that adds a \\\"momentum\\\" term to the parameter updates. This term helps smooth out the updates and allows the optimizer to continue moving in the right direction, even if the gradient changes direction or varies in magnitude. Momentum is particularly useful for training deep neural networks.\n",
220
+ "4. **RMSprop**: RMSprop is an optimization algorithm that uses a moving average of the squared gradient to adapt the learning rate of each parameter. This helps to avoid oscillations in the parameter updates and can improve convergence in some cases.\n",
221
+ "5. **Adam**: Adam is a popular optimization algorithm that combines the ideas of momentum and RMSprop. It uses a moving average of both the gradient and its squared value to adapt the learning rate of each parameter. Adam is often used as a default optimizer for deep learning models.\n",
222
+ "6. **AdamW**: AdamW is a modification of the Adam optimizer that adds weight decay to the parameter updates. This helps to regularize the model and can improve generalization performance. We will be using the AdamW optimizer as it best suits the properties of the model we will train in this video.\n",
223
+ "\n",
224
+ "find more optimizers and details at torch.optim"
225
+ ]
226
+ },
227
+ {
228
+ "cell_type": "code",
229
+ "execution_count": 23,
230
+ "id": "547c740d",
231
+ "metadata": {},
232
+ "outputs": [
233
+ {
234
+ "name": "stdout",
235
+ "output_type": "stream",
236
+ "text": [
237
+ "Iter 0:\n",
238
+ "5.172138690948486\n",
239
+ "Iter 5000:\n",
240
+ "4.190489768981934\n",
241
+ "Iter 10000:\n",
242
+ "3.747365713119507\n",
243
+ "Iter 15000:\n",
244
+ "3.188748359680176\n",
245
+ "Iter 20000:\n",
246
+ "2.9176113605499268\n",
247
+ "Iter 25000:\n",
248
+ "2.900815963745117\n",
249
+ "Iter 30000:\n",
250
+ "2.2446277141571045\n",
251
+ "Iter 35000:\n",
252
+ "2.5964982509613037\n",
253
+ "Iter 40000:\n",
254
+ "2.225098133087158\n",
255
+ "Iter 45000:\n",
256
+ "2.517181158065796\n",
257
+ "Iter 50000:\n",
258
+ "2.586806535720825\n",
259
+ "Iter 55000:\n",
260
+ "2.4346859455108643\n",
261
+ "Iter 60000:\n",
262
+ "2.952392816543579\n",
263
+ "Iter 65000:\n",
264
+ "2.6527769565582275\n",
265
+ "Iter 70000:\n",
266
+ "2.369570016860962\n",
267
+ "Iter 75000:\n",
268
+ "2.434110641479492\n",
269
+ "Iter 80000:\n",
270
+ "2.4495511054992676\n",
271
+ "Iter 85000:\n",
272
+ "2.4248077869415283\n",
273
+ "Iter 90000:\n",
274
+ "2.6163671016693115\n",
275
+ "Iter 95000:\n",
276
+ "2.5076956748962402\n",
277
+ "2.6161372661590576\n"
278
+ ]
279
+ }
280
+ ],
281
+ "source": [
282
+ "optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
283
+ "\n",
284
+ "for iter in range(max_iters):\n",
285
+ " # sample a batch\n",
286
+ " xb, yb = get_batch('train')\n",
287
+ "\n",
288
+ " # evaluate the loss\n",
289
+ " logits, loss = model.forward(xb, yb)\n",
290
+ " optimizer.zero_grad(set_to_none=True)\n",
291
+ " loss.backward()\n",
292
+ " optimizer.step()\n",
293
+ "\n",
294
+ " if iter % eval_every == 0:\n",
295
+ " print(f\"Iter {iter}:\")\n",
296
+ " print(loss.item())\n",
297
+ "print(loss.item())"
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "code",
302
+ "execution_count": 24,
303
+ "id": "64e938b2",
304
+ "metadata": {},
305
+ "outputs": [
306
+ {
307
+ "name": "stdout",
308
+ "output_type": "stream",
309
+ "text": [
310
+ "\txsunt bly? fl ed s.\n",
311
+ "Mulyowave br, chud nllyon wdiman nd st chapeed,\n",
312
+ "Anoouscelllend mor yo hesspethe \n"
313
+ ]
314
+ }
315
+ ],
316
+ "source": [
317
+ "\n",
318
+ "context = torch.zeros((1,1), dtype=torch.long, device=device)\n",
319
+ "generated_chars = decode(model.generate(context, max_new_tokens=100)[0].tolist())\n",
320
+ "print(generated_chars)"
321
+ ]
322
+ }
323
+ ],
324
+ "metadata": {
325
+ "kernelspec": {
326
+ "display_name": "Python 3 (ipykernel)",
327
+ "language": "python",
328
+ "name": "python3"
329
+ },
330
+ "language_info": {
331
+ "codemirror_mode": {
332
+ "name": "ipython",
333
+ "version": 3
334
+ },
335
+ "file_extension": ".py",
336
+ "mimetype": "text/x-python",
337
+ "name": "python",
338
+ "nbconvert_exporter": "python",
339
+ "pygments_lexer": "ipython3",
340
+ "version": "3.10.12"
341
+ },
342
+ "varInspector": {
343
+ "cols": {
344
+ "lenName": 16,
345
+ "lenType": 16,
346
+ "lenVar": 40
347
+ },
348
+ "kernels_config": {
349
+ "python": {
350
+ "delete_cmd_postfix": "",
351
+ "delete_cmd_prefix": "del ",
352
+ "library": "var_list.py",
353
+ "varRefreshCmd": "print(var_dic_list())"
354
+ },
355
+ "r": {
356
+ "delete_cmd_postfix": ") ",
357
+ "delete_cmd_prefix": "rm(",
358
+ "library": "var_list.r",
359
+ "varRefreshCmd": "cat(var_dic_list()) "
360
+ }
361
+ },
362
+ "types_to_exclude": [
363
+ "module",
364
+ "function",
365
+ "builtin_function_or_method",
366
+ "instance",
367
+ "_Feature"
368
+ ],
369
+ "window_display": false
370
+ }
371
+ },
372
+ "nbformat": 4,
373
+ "nbformat_minor": 5
374
+ }
bigram_testing.sync.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ---
2
+ # jupyter:
3
+ # jupytext:
4
+ # text_representation:
5
+ # extension: .py
6
+ # format_name: percent
7
+ # format_version: '1.3'
8
+ # jupytext_version: 1.3.4
9
+ # kernelspec:
10
+ # display_name: Python 3
11
+ # language: python
12
+ # name: python3
13
+ # ---
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn import functional as F
17
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ print(device)
19
+ block_size = 8
20
+ batch_size = 4
21
+ max_iters = 100000
22
+ learning_rate = 3e-4
23
+ eval_every = 5000
24
+
25
+ # %%
26
+ with open("shakespeare.txt") as f:
27
+ text = f.read()
28
+ print(text[:500])
29
+
30
+ # %%
31
+ chars = sorted(set(text))
32
+ vocab_size = len(chars)
33
+
34
+ # %%
35
+ print(f"Vocab size: {vocab_size}")
36
+ print(f"Text length: {len(text)}")
37
+
38
+ # %%
39
+ string_to_int = {ch: i for i, ch in enumerate(chars)}
40
+ int_to_string = {i: ch for i, ch in enumerate(chars)}
41
+
42
+ encode = lambda s: [string_to_int[ch] for ch in s]
43
+ decode = lambda x: ''.join([int_to_string[i] for i in x])
44
+
45
+ data = torch.tensor(encode(text), dtype=torch.long, device=device)
46
+
47
+
48
+ # %%
49
+ n = int(0.8 * len(data))
50
+ train_data = data[:n]
51
+ val_data = data[n:]
52
+
53
+ # %%
54
+ def get_batch(split):
55
+ data = train_data if split == 'train' else val_data
56
+ ix = torch.randint(len(data) - block_size, (batch_size,))
57
+ x = torch.stack([data[i:i+block_size] for i in ix])
58
+ y = torch.stack([data[i+1:i+block_size+1] for i in ix])
59
+ x, y = x.to(device), y.to(device)
60
+ return x, y
61
+
62
+ # %%
63
+ x, y = get_batch('train')
64
+
65
+ # %%
66
+
67
+ class BigramLanguageModel(nn.Module):
68
+ def __init__(self, vocab_size):
69
+ super().__init__()
70
+ self.token_embedding_table = nn.Embedding(vocab_size, vocab_size)
71
+
72
+ def forward(self, index, targets=None):
73
+ logits = self.token_embedding_table(index)
74
+ if targets is None:
75
+ loss = None
76
+ else:
77
+ B, T, C = logits.shape
78
+ logits = logits.view(B*T, C) # reshape to what torch.cross_entropy expects
79
+ targets = targets.view(B*T)
80
+ loss = F.cross_entropy(logits, targets)
81
+ return logits, loss
82
+ def generate(self, index, max_new_tokens):
83
+ # index is (B, T) array of indices in the current context
84
+ for _ in range(max_new_tokens):
85
+ # get the predictions
86
+ logits, loss = self.forward(index)
87
+ # focus only on the last time step
88
+ logits = logits[:, -1, :] # becomes (B, C)
89
+ # apply softmax to get probabilities
90
+ probs = F.softmax(logits, dim=-1) # (B, C)
91
+ # sample from the distribution
92
+ index_next = torch.multinomial(probs, num_samples=1) # (B, 1)
93
+ # append sampled index to the running sequence
94
+ index = torch.cat((index, index_next), dim=1) # (B, T+1)
95
+ return index
96
+
97
+ # %%
98
+ model = BigramLanguageModel(vocab_size).to(device)
99
+
100
+ context = torch.zeros((1,1), dtype=torch.long, device=device)
101
+ generated_chars = decode(model.generate(context, max_new_tokens=100)[0].tolist())
102
+ print(generated_chars)
103
+
104
+ # %% [markdown]
105
+ #
106
+ # ### Some common optimizers
107
+ # 1. **Mean Squared Error (MSE)**: MSE is a common loss function used in regression problems, where the goal is to predict a continuous output. It measures the average squared difference between the predicted and actual values, and is often used to train neural networks for regression tasks.
108
+ # 2. **Gradient Descent (GD):** is an optimization algorithm used to minimize the loss function of a machine learning model. The loss function measures how well the model is able to predict the target variable based on the input features. The idea of GD is to iteratively adjust the model parameters in the direction of the steepest descent of the loss function
109
+ # 3. **Momentum**: Momentum is an extension of SGD that adds a \"momentum\" term to the parameter updates. This term helps smooth out the updates and allows the optimizer to continue moving in the right direction, even if the gradient changes direction or varies in magnitude. Momentum is particularly useful for training deep neural networks.
110
+ # 4. **RMSprop**: RMSprop is an optimization algorithm that uses a moving average of the squared gradient to adapt the learning rate of each parameter. This helps to avoid oscillations in the parameter updates and can improve convergence in some cases.
111
+ # 5. **Adam**: Adam is a popular optimization algorithm that combines the ideas of momentum and RMSprop. It uses a moving average of both the gradient and its squared value to adapt the learning rate of each parameter. Adam is often used as a default optimizer for deep learning models.
112
+ # 6. **AdamW**: AdamW is a modification of the Adam optimizer that adds weight decay to the parameter updates. This helps to regularize the model and can improve generalization performance. We will be using the AdamW optimizer as it best suits the properties of the model we will train in this video.
113
+
114
+ # find more optimizers and details at torch.optim
115
+
116
+ # %%
117
+ optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
118
+
119
+ for iter in range(max_iters):
120
+ # sample a batch
121
+ xb, yb = get_batch('train')
122
+
123
+ # evaluate the loss
124
+ logits, loss = model.forward(xb, yb)
125
+ optimizer.zero_grad(set_to_none=True)
126
+ loss.backward()
127
+ optimizer.step()
128
+
129
+ if iter % eval_every == 0:
130
+ print(f"Iter {iter}:")
131
+ print(loss.item())
132
+ print(loss.item())
133
+
134
+ # %%
135
+
136
+ context = torch.zeros((1,1), dtype=torch.long, device=device)
137
+ generated_chars = decode(model.generate(context, max_new_tokens=100)[0].tolist())
138
+ print(generated_chars)
139
+
testing.sync.ipynb DELETED
@@ -1,103 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 4,
6
- "id": "b056ad8b",
7
- "metadata": {},
8
- "outputs": [
9
- {
10
- "name": "stdout",
11
- "output_type": "stream",
12
- "text": [
13
- "THE SONNETS\n",
14
- "\n",
15
- " 1\n",
16
- "\n",
17
- "From fairest creatures we desire increase,\n",
18
- "That thereby beauty’s rose might never die,\n",
19
- "But as the riper should by time decease,\n",
20
- "His tender heir might bear his memory:\n",
21
- "But thou contracted to thine own bright eyes,\n",
22
- "Feed’st thy light’s flame with self-substantial fuel,\n",
23
- "Making a famine where abundance lies,\n",
24
- "Thyself thy foe, to thy sweet self too cruel:\n",
25
- "Thou that art now the world’s fresh ornament,\n",
26
- "And only herald to the gaudy spring,\n",
27
- "Within thine own bud buriest thy content,\n",
28
- "And, tender churl, mak’st waste in niggarding:\n",
29
- " Pity the world, or else this glutton be,\n",
30
- " To eat the world’s due, by the grave and thee.\n",
31
- "\n",
32
- "\n",
33
- " 2\n",
34
- "\n",
35
- "When forty winters shall besiege thy brow,\n",
36
- "And dig deep trenches in thy beauty’s field,\n",
37
- "Thy youth’s proud livery so gazed on now,\n",
38
- "Will be a tattered weed of small worth held:\n",
39
- "Then being asked, where all thy beauty lies,\n",
40
- "Where all the treasure of thy lusty days;\n",
41
- "To say, within thine own deep sunken eyes,\n",
42
- "Were an all-eating\n"
43
- ]
44
- }
45
- ],
46
- "source": [
47
- "with open(\"shakespeare.txt\") as f:\n",
48
- " text = f.read()\n",
49
- "print(text[:1000])"
50
- ]
51
- }
52
- ],
53
- "metadata": {
54
- "kernelspec": {
55
- "display_name": "Python 3 (ipykernel)",
56
- "language": "python",
57
- "name": "python3"
58
- },
59
- "language_info": {
60
- "codemirror_mode": {
61
- "name": "ipython",
62
- "version": 3
63
- },
64
- "file_extension": ".py",
65
- "mimetype": "text/x-python",
66
- "name": "python",
67
- "nbconvert_exporter": "python",
68
- "pygments_lexer": "ipython3",
69
- "version": "3.10.12"
70
- },
71
- "varInspector": {
72
- "cols": {
73
- "lenName": 16,
74
- "lenType": 16,
75
- "lenVar": 40
76
- },
77
- "kernels_config": {
78
- "python": {
79
- "delete_cmd_postfix": "",
80
- "delete_cmd_prefix": "del ",
81
- "library": "var_list.py",
82
- "varRefreshCmd": "print(var_dic_list())"
83
- },
84
- "r": {
85
- "delete_cmd_postfix": ") ",
86
- "delete_cmd_prefix": "rm(",
87
- "library": "var_list.r",
88
- "varRefreshCmd": "cat(var_dic_list()) "
89
- }
90
- },
91
- "types_to_exclude": [
92
- "module",
93
- "function",
94
- "builtin_function_or_method",
95
- "instance",
96
- "_Feature"
97
- ],
98
- "window_display": false
99
- }
100
- },
101
- "nbformat": 4,
102
- "nbformat_minor": 5
103
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
testing.sync.py DELETED
@@ -1,23 +0,0 @@
1
- # ---
2
- # jupyter:
3
- # jupytext:
4
- # text_representation:
5
- # extension: .py
6
- # format_name: percent
7
- # format_version: '1.3'
8
- # jupytext_version: 1.3.4
9
- # kernelspec:
10
- # display_name: Python 3
11
- # language: python
12
- # name: python3
13
- # ---
14
-
15
- # %%
16
- with open("shakespeare.txt") as f:
17
- text = f.read()
18
- print(text[:1000])
19
-
20
-
21
- # %%
22
- chars = sorted(set(text))
23
- print(chars)