dedebo commited on
Commit
f3d3ebc
·
verified ·
1 Parent(s): 7537b61

Upload 4 files

Browse files
Files changed (4) hide show
  1. 00-Installations.ipynb +84 -0
  2. 01-EDA.ipynb +0 -0
  3. 02-Work Embeddings.ipynb +486 -0
  4. utils.py +71 -0
00-Installations.ipynb ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "9ae531c8-ea80-49d1-9513-4d4e926b7e7b",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "!pip install easydict"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "3836f6af-c2cc-438e-ba9b-812c9b7b590b",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "!pip install datasets\n",
21
+ "!pip install pandas\n",
22
+ "!pip install matplotlib\n",
23
+ "!pip install seaborn\n",
24
+ "!pip install pillow\n",
25
+ "!pip install wordcloud\n",
26
+ "!pip install ipywidgets\n",
27
+ "!pip install sentence-transformers\n",
28
+ "!pip install easydict\n",
29
+ "!pip install --force-reinstall -v \"numpy==1.25.2\""
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": null,
35
+ "id": "e1888801-9188-433e-8c23-ae3731901846",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "#!pip install git+https://github.com/openai/CLIP.git"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "id": "8befa941-e491-4bd8-b898-55a2293c5f27",
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "!pip install typing-extensions>=4.8.0"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": null,
55
+ "id": "c37006e8-c0f4-428c-b884-95bfb7280b9a",
56
+ "metadata": {},
57
+ "outputs": [],
58
+ "source": [
59
+ "!pip install tensorflow[and-cuda]"
60
+ ]
61
+ }
62
+ ],
63
+ "metadata": {
64
+ "kernelspec": {
65
+ "display_name": "Python 3 (ipykernel)",
66
+ "language": "python",
67
+ "name": "python3"
68
+ },
69
+ "language_info": {
70
+ "codemirror_mode": {
71
+ "name": "ipython",
72
+ "version": 3
73
+ },
74
+ "file_extension": ".py",
75
+ "mimetype": "text/x-python",
76
+ "name": "python",
77
+ "nbconvert_exporter": "python",
78
+ "pygments_lexer": "ipython3",
79
+ "version": "3.11.9"
80
+ }
81
+ },
82
+ "nbformat": 4,
83
+ "nbformat_minor": 5
84
+ }
01-EDA.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
02-Work Embeddings.ipynb ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "20631e0c-5f53-465b-8d9e-7b8072e26eda",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "C:\\Users\\daparekh\\AppData\\Roaming\\Python\\Python311\\site-packages\\threadpoolctl.py:1214: RuntimeWarning: \n",
14
+ "Found Intel OpenMP ('libiomp') and LLVM OpenMP ('libomp') loaded at\n",
15
+ "the same time. Both libraries are known to be incompatible and this\n",
16
+ "can cause random crashes or deadlocks on Linux when loaded in the\n",
17
+ "same Python program.\n",
18
+ "Using threadpoolctl may cause crashes or deadlocks. For more\n",
19
+ "information and possible workarounds, please see\n",
20
+ " https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md\n",
21
+ "\n",
22
+ " warnings.warn(msg, RuntimeWarning)\n"
23
+ ]
24
+ }
25
+ ],
26
+ "source": [
27
+ "from datasets import load_from_disk\n",
28
+ "from sentence_transformers import SentenceTransformer\n",
29
+ "import numpy as np"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 2,
35
+ "id": "246a008c-a210-4bd6-99c4-0ada886cb11e",
36
+ "metadata": {},
37
+ "outputs": [
38
+ {
39
+ "data": {
40
+ "text/plain": [
41
+ "DatasetDict({\n",
42
+ " train: Dataset({\n",
43
+ " features: ['image', 'company', 'content', 'description', 'fulltext'],\n",
44
+ " num_rows: 33034\n",
45
+ " })\n",
46
+ " test: Dataset({\n",
47
+ " features: ['image', 'company', 'content', 'description', 'fulltext'],\n",
48
+ " num_rows: 14158\n",
49
+ " })\n",
50
+ "})"
51
+ ]
52
+ },
53
+ "execution_count": 2,
54
+ "metadata": {},
55
+ "output_type": "execute_result"
56
+ }
57
+ ],
58
+ "source": [
59
+ "reloaded_dataset = load_from_disk(\"PreProcessedData\")\n",
60
+ "reloaded_dataset"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": 3,
66
+ "id": "0c4dd5ce-701a-4afc-afdf-93e675147864",
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "from collections import Counter\n",
71
+ "import torch\n",
72
+ "import torch.nn as nn"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 4,
78
+ "id": "ab0deb7d-245d-4620-b9d6-dd71df74600a",
79
+ "metadata": {},
80
+ "outputs": [
81
+ {
82
+ "name": "stdout",
83
+ "output_type": "stream",
84
+ "text": [
85
+ "999296\n",
86
+ "1428686\n"
87
+ ]
88
+ }
89
+ ],
90
+ "source": [
91
+ "merged_sentance = \"\"\n",
92
+ "for data in reloaded_dataset[\"train\"]:\n",
93
+ " merged_sentance = merged_sentance + data[\"company\"]+\" \"+ data[\"content\"]+\" \"+ data[\"description\"]+\" \"\n",
94
+ "print(len(merged_sentance))\n",
95
+ "for data in reloaded_dataset[\"test\"]:\n",
96
+ " merged_sentance = merged_sentance + data[\"company\"]+\" \"+ data[\"content\"]+\" \"+ data[\"description\"]+\" \"\n",
97
+ "print(len(merged_sentance))"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "code",
102
+ "execution_count": 5,
103
+ "id": "437fdfb1-c1db-4dff-888d-5c9b1029add7",
104
+ "metadata": {},
105
+ "outputs": [],
106
+ "source": [
107
+ "words = merged_sentance.split(' ')\n",
108
+ " \n",
109
+ "# create a dictionary\n",
110
+ "vocab = Counter(words) \n",
111
+ "vocab = sorted(vocab, key=vocab.get, reverse=True)\n",
112
+ "vocab_size = len(vocab)\n",
113
+ " \n",
114
+ "# create a word to index dictionary from our Vocab dictionary\n",
115
+ "word2idx = {word: ind for ind, word in enumerate(vocab)} \n",
116
+ "idx2word = {ind: word for ind, word in enumerate(vocab)} "
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": 6,
122
+ "id": "862fbc17-ae03-48b5-b1ae-b6ed63eafb22",
123
+ "metadata": {},
124
+ "outputs": [
125
+ {
126
+ "data": {
127
+ "text/plain": [
128
+ "(1790, 1790)"
129
+ ]
130
+ },
131
+ "execution_count": 6,
132
+ "metadata": {},
133
+ "output_type": "execute_result"
134
+ }
135
+ ],
136
+ "source": [
137
+ "len(word2idx),len(idx2word)"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": 7,
143
+ "id": "008562d4-3789-4d26-8ab8-5d3b980b2438",
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "words = reloaded_dataset[\"train\"][100]['fulltext']\n",
148
+ "words = words.split(' ')"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": 8,
154
+ "id": "c7304275-11c7-4aa9-a929-3b6e9cd31f0a",
155
+ "metadata": {},
156
+ "outputs": [],
157
+ "source": [
158
+ "encoded_sentences = [word2idx[word] for word in words]\n",
159
+ " \n",
160
+ "# assign a value to your embedding_dim\n",
161
+ "e_dim = 1"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 9,
167
+ "id": "303ecce6-8560-44e6-8c62-e316315c3d04",
168
+ "metadata": {},
169
+ "outputs": [
170
+ {
171
+ "data": {
172
+ "text/plain": [
173
+ "[12, 76, 34, 27, 0, 7, 1, 2]"
174
+ ]
175
+ },
176
+ "execution_count": 9,
177
+ "metadata": {},
178
+ "output_type": "execute_result"
179
+ }
180
+ ],
181
+ "source": [
182
+ "encoded_sentences"
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "code",
187
+ "execution_count": 10,
188
+ "id": "82daf945-1bd5-4d75-be9b-bdfc1e8b1ee8",
189
+ "metadata": {},
190
+ "outputs": [
191
+ {
192
+ "data": {
193
+ "text/plain": [
194
+ "tensor([12, 76, 34, 27, 0, 7, 1, 2])"
195
+ ]
196
+ },
197
+ "execution_count": 10,
198
+ "metadata": {},
199
+ "output_type": "execute_result"
200
+ }
201
+ ],
202
+ "source": [
203
+ "torch.LongTensor(encoded_sentences)"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 11,
209
+ "id": "24a20907-1403-4677-ab62-725b25f5fa06",
210
+ "metadata": {},
211
+ "outputs": [
212
+ {
213
+ "name": "stdout",
214
+ "output_type": "stream",
215
+ "text": [
216
+ "torch.Size([8, 1])\n"
217
+ ]
218
+ }
219
+ ],
220
+ "source": [
221
+ "# initialise an Embedding layer from Torch\n",
222
+ "emb = nn.Embedding(vocab_size, e_dim, padding_idx = 3)\n",
223
+ "word_vectors = emb(torch.LongTensor(encoded_sentences))\n",
224
+ " \n",
225
+ "#print the word_vectors\n",
226
+ "print(word_vectors.shape)"
227
+ ]
228
+ },
229
+ {
230
+ "cell_type": "code",
231
+ "execution_count": 12,
232
+ "id": "9a5be641-14ee-45fa-8fa6-fd73834ac05d",
233
+ "metadata": {},
234
+ "outputs": [],
235
+ "source": [
236
+ "def get_encoded_sentences(sentance):\n",
237
+ " words = sentance.split(' ')\n",
238
+ " encoded_words = [word2idx[word] for word in words]\n",
239
+ " return encoded_words\n",
240
+ "\n",
241
+ "def get_decoded_sentences(encoded_words):\n",
242
+ " sentance = ' '.join([idx2word[idx] for idx in encoded_words])\n",
243
+ " return sentance"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": 13,
249
+ "id": "df258a3a-0f6b-4f63-bfd3-f60a06f65471",
250
+ "metadata": {},
251
+ "outputs": [
252
+ {
253
+ "data": {
254
+ "text/plain": [
255
+ "'facebook'"
256
+ ]
257
+ },
258
+ "execution_count": 13,
259
+ "metadata": {},
260
+ "output_type": "execute_result"
261
+ }
262
+ ],
263
+ "source": [
264
+ "get_decoded_sentences(get_encoded_sentences(\"facebook\"))"
265
+ ]
266
+ },
267
+ {
268
+ "cell_type": "code",
269
+ "execution_count": 14,
270
+ "id": "4cf66ff1-0d7d-4f1c-b0af-29b0396bf3c8",
271
+ "metadata": {},
272
+ "outputs": [
273
+ {
274
+ "data": {
275
+ "text/plain": [
276
+ "{'image': <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=64x64>,\n",
277
+ " 'company': 'apple',\n",
278
+ " 'content': 'flag',\n",
279
+ " 'description': 'New Zealand',\n",
280
+ " 'fulltext': 'apple flag New Zealand'}"
281
+ ]
282
+ },
283
+ "execution_count": 14,
284
+ "metadata": {},
285
+ "output_type": "execute_result"
286
+ }
287
+ ],
288
+ "source": [
289
+ "reloaded_dataset[\"train\"][2]"
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "code",
294
+ "execution_count": 15,
295
+ "id": "91ac0e54-90bd-4c20-8cb8-7e333548f279",
296
+ "metadata": {},
297
+ "outputs": [],
298
+ "source": [
299
+ "fulltext_vector = []\n",
300
+ "for data in reloaded_dataset[\"train\"]:\n",
301
+ " #print(data[\"fulltext\"])\n",
302
+ " #print(get_encoded_sentences(data[\"fulltext\"]))\n",
303
+ " encoded_sentences = get_encoded_sentences(data[\"fulltext\"])\n",
304
+ " fulltext_vector.append(np.pad(encoded_sentences, [(0, 100-len(encoded_sentences))], mode='constant', constant_values=0))\n",
305
+ " #print(fulltext_vector)"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": 16,
311
+ "id": "c913b2c3-8ba9-4eae-bcc3-93ef0998f40b",
312
+ "metadata": {},
313
+ "outputs": [
314
+ {
315
+ "data": {
316
+ "text/plain": [
317
+ "DatasetDict({\n",
318
+ " train: Dataset({\n",
319
+ " features: ['image', 'company', 'content', 'description', 'fulltext', 'fulltext_vector'],\n",
320
+ " num_rows: 33034\n",
321
+ " })\n",
322
+ " test: Dataset({\n",
323
+ " features: ['image', 'company', 'content', 'description', 'fulltext'],\n",
324
+ " num_rows: 14158\n",
325
+ " })\n",
326
+ "})"
327
+ ]
328
+ },
329
+ "execution_count": 16,
330
+ "metadata": {},
331
+ "output_type": "execute_result"
332
+ }
333
+ ],
334
+ "source": [
335
+ "reloaded_dataset[\"train\"]=reloaded_dataset[\"train\"].add_column(\"fulltext_vector\", fulltext_vector)\n",
336
+ "reloaded_dataset"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": 17,
342
+ "id": "acdbc88f-1cba-4c75-86fd-db9f59454a50",
343
+ "metadata": {},
344
+ "outputs": [],
345
+ "source": [
346
+ "fulltext_vector = []\n",
347
+ "for data in reloaded_dataset[\"test\"]:\n",
348
+ " #print(data[\"fulltext\"])\n",
349
+ " #print(get_encoded_sentences(data[\"fulltext\"]))\n",
350
+ " encoded_sentences = get_encoded_sentences(data[\"fulltext\"])\n",
351
+ " fulltext_vector.append(np.pad(encoded_sentences, [(0, 100-len(encoded_sentences))], mode='constant', constant_values=0))\n",
352
+ " #print(fulltext_vector)"
353
+ ]
354
+ },
355
+ {
356
+ "cell_type": "code",
357
+ "execution_count": 18,
358
+ "id": "e51f138c-117c-4f79-8260-de1d56512d07",
359
+ "metadata": {},
360
+ "outputs": [
361
+ {
362
+ "data": {
363
+ "text/plain": [
364
+ "DatasetDict({\n",
365
+ " train: Dataset({\n",
366
+ " features: ['image', 'company', 'content', 'description', 'fulltext', 'fulltext_vector'],\n",
367
+ " num_rows: 33034\n",
368
+ " })\n",
369
+ " test: Dataset({\n",
370
+ " features: ['image', 'company', 'content', 'description', 'fulltext', 'fulltext_vector'],\n",
371
+ " num_rows: 14158\n",
372
+ " })\n",
373
+ "})"
374
+ ]
375
+ },
376
+ "execution_count": 18,
377
+ "metadata": {},
378
+ "output_type": "execute_result"
379
+ }
380
+ ],
381
+ "source": [
382
+ "reloaded_dataset[\"test\"]=reloaded_dataset[\"test\"].add_column(\"fulltext_vector\", fulltext_vector)\n",
383
+ "reloaded_dataset"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": 19,
389
+ "id": "a7c46e54-87c7-40c4-836c-b72dfbaea28e",
390
+ "metadata": {},
391
+ "outputs": [
392
+ {
393
+ "data": {
394
+ "application/vnd.jupyter.widget-view+json": {
395
+ "model_id": "93f1b3dc347847e89f0e18d9796fc2c9",
396
+ "version_major": 2,
397
+ "version_minor": 0
398
+ },
399
+ "text/plain": [
400
+ "Saving the dataset (0/1 shards): 0%| | 0/33034 [00:00<?, ? examples/s]"
401
+ ]
402
+ },
403
+ "metadata": {},
404
+ "output_type": "display_data"
405
+ },
406
+ {
407
+ "data": {
408
+ "application/vnd.jupyter.widget-view+json": {
409
+ "model_id": "36765e95e88043a6aee8647b21900042",
410
+ "version_major": 2,
411
+ "version_minor": 0
412
+ },
413
+ "text/plain": [
414
+ "Saving the dataset (0/1 shards): 0%| | 0/14158 [00:00<?, ? examples/s]"
415
+ ]
416
+ },
417
+ "metadata": {},
418
+ "output_type": "display_data"
419
+ }
420
+ ],
421
+ "source": [
422
+ "reloaded_dataset.save_to_disk(\"PreProcessedDataWithEmb\")"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "code",
427
+ "execution_count": 20,
428
+ "id": "05367cd9-0afc-40cc-8dc7-2b9689eb1506",
429
+ "metadata": {},
430
+ "outputs": [
431
+ {
432
+ "data": {
433
+ "text/plain": [
434
+ "DatasetDict({\n",
435
+ " train: Dataset({\n",
436
+ " features: ['image', 'company', 'content', 'description', 'fulltext', 'fulltext_vector'],\n",
437
+ " num_rows: 33034\n",
438
+ " })\n",
439
+ " test: Dataset({\n",
440
+ " features: ['image', 'company', 'content', 'description', 'fulltext', 'fulltext_vector'],\n",
441
+ " num_rows: 14158\n",
442
+ " })\n",
443
+ "})"
444
+ ]
445
+ },
446
+ "execution_count": 20,
447
+ "metadata": {},
448
+ "output_type": "execute_result"
449
+ }
450
+ ],
451
+ "source": [
452
+ "reloaded_dataset = load_from_disk(\"PreProcessedDataWithEmb\")\n",
453
+ "reloaded_dataset"
454
+ ]
455
+ },
456
+ {
457
+ "cell_type": "code",
458
+ "execution_count": null,
459
+ "id": "c3f8dfad-4dce-468d-bcbd-75743a6556f3",
460
+ "metadata": {},
461
+ "outputs": [],
462
+ "source": []
463
+ }
464
+ ],
465
+ "metadata": {
466
+ "kernelspec": {
467
+ "display_name": "Python 3 (ipykernel)",
468
+ "language": "python",
469
+ "name": "python3"
470
+ },
471
+ "language_info": {
472
+ "codemirror_mode": {
473
+ "name": "ipython",
474
+ "version": 3
475
+ },
476
+ "file_extension": ".py",
477
+ "mimetype": "text/x-python",
478
+ "name": "python",
479
+ "nbconvert_exporter": "python",
480
+ "pygments_lexer": "ipython3",
481
+ "version": "3.11.9"
482
+ }
483
+ },
484
+ "nbformat": 4,
485
+ "nbformat_minor": 5
486
+ }
utils.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # coding: utf-8
3
+
4
+ # In[2]:
5
+
6
+
7
+ # import nbimporter
8
+
9
+
10
+ # In[3]:
11
+
12
+
13
+ import numpy as np
14
+ from torch import nn
15
+ from torch import autograd
16
+ import torch
17
+ import os
18
+ import pdb
19
+
20
+
21
+ # In[ ]:
22
+
23
+
24
+ class Concat_embed(nn.Module):
25
+
26
+ def __init__(self, embed_dim, projected_embed_dim):
27
+ super(Concat_embed, self).__init__()
28
+ self.projection = nn.Sequential(
29
+ nn.Linear(in_features=embed_dim, out_features=projected_embed_dim),
30
+ nn.BatchNorm1d(num_features=projected_embed_dim),
31
+ nn.LeakyReLU(negative_slope=0.2, inplace=True)
32
+ )
33
+
34
+ def forward(self, inp, embed):
35
+ projected_embed = self.projection(embed)
36
+ replicated_embed = projected_embed.repeat(4, 4, 1, 1).permute(2, 3, 0, 1)
37
+ hidden_concat = torch.cat([inp, replicated_embed], 1)
38
+
39
+ return hidden_concat
40
+
41
+ class Utils(object):
42
+
43
+ @staticmethod
44
+ def smooth_label(tensor, offset):
45
+ return tensor + offset
46
+
47
+ @staticmethod
48
+ def save_checkpoint(netD, netG, dir_path, subdir_path, epoch):
49
+ path = os.path.join(dir_path, subdir_path)
50
+ if not os.path.exists(path):
51
+ os.makedirs(path)
52
+
53
+ torch.save(netD.state_dict(), '{0}/disc_{1}.pth'.format(path, epoch))
54
+ torch.save(netG.state_dict(), '{0}/gen_{1}.pth'.format(path, epoch))
55
+
56
+ @staticmethod
57
+ def weights_init(m):
58
+ classname = m.__class__.__name__
59
+ if classname.find('Conv') != -1:
60
+ m.weight.data.normal_(0.0, 0.02)
61
+ elif classname.find('BatchNorm') != -1:
62
+ m.weight.data.normal_(1.0, 0.02)
63
+ m.bias.data.fill_(0)
64
+
65
+
66
+ class Logger(object):
67
+
68
+ def log_iteration_gan(self, epoch, iteration, d_loss, g_loss, real_score, fake_score):
69
+ print("Epoch: %d, Iter: %d, d_loss= %f, g_loss= %f, D(X)= %f, D(G(X))= %f" % (
70
+ epoch, iteration, d_loss.data.cpu().mean(), g_loss.data.cpu().mean(), real_score.data.cpu().mean(),
71
+ fake_score.data.cpu().mean()))