Francis2003 commited on
Commit
94f5c25
·
verified ·
1 Parent(s): ec151e6

Upload assets/code/FrancisGross_screenplay_pred_nom.ipynb with huggingface_hub

Browse files
assets/code/FrancisGross_screenplay_pred_nom.ipynb ADDED
@@ -0,0 +1,1398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "fafe2f02-0f95-4f44-b272-59466084cfe4",
6
+ "metadata": {},
7
+ "source": [
8
+ "**Predicting Oscar-Nominated Screenplays with Sentence Embeddings**\n",
9
+ "\n",
10
+ "Francis Gross \n",
11
+ "Faculty of Informatics and Data Science \n",
12
+ "University of Regensburg \n",
13
+ "93053 Regensburg, Germany \n",
14
+ "francis.gross@stud.uni-regensburg.de"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 78,
20
+ "id": "c2b3edc6-d38a-4f38-9862-fdee52e95f5a",
21
+ "metadata": {},
22
+ "outputs": [],
23
+ "source": [
24
+ "# import:\n",
25
+ "\n",
26
+ "import pandas as pd\n",
27
+ "import re, numpy as np\n",
28
+ "import unicodedata\n",
29
+ "import joblib\n",
30
+ "import torch\n",
31
+ "\n",
32
+ "from pathlib import Path\n",
33
+ "from datasets import load_dataset\n",
34
+ "from sklearn.model_selection import train_test_split\n",
35
+ "from tqdm import tqdm\n",
36
+ "from sentence_transformers import SentenceTransformer\n",
37
+ "from sklearn.linear_model import LogisticRegression\n",
38
+ "from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, average_precision_score"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": 79,
44
+ "id": "063b63d8-0d34-459f-8f60-b678edcc02ca",
45
+ "metadata": {},
46
+ "outputs": [
47
+ {
48
+ "name": "stderr",
49
+ "output_type": "stream",
50
+ "text": [
51
+ "Repo card metadata block was not found. Setting CardData to empty.\n"
52
+ ]
53
+ },
54
+ {
55
+ "data": {
56
+ "text/html": [
57
+ "<div>\n",
58
+ "<style scoped>\n",
59
+ " .dataframe tbody tr th:only-of-type {\n",
60
+ " vertical-align: middle;\n",
61
+ " }\n",
62
+ "\n",
63
+ " .dataframe tbody tr th {\n",
64
+ " vertical-align: top;\n",
65
+ " }\n",
66
+ "\n",
67
+ " .dataframe thead th {\n",
68
+ " text-align: right;\n",
69
+ " }\n",
70
+ "</style>\n",
71
+ "<table border=\"1\" class=\"dataframe\">\n",
72
+ " <thead>\n",
73
+ " <tr style=\"text-align: right;\">\n",
74
+ " <th></th>\n",
75
+ " <th>movie_name</th>\n",
76
+ " <th>imdb_id</th>\n",
77
+ " <th>script</th>\n",
78
+ " <th>summary</th>\n",
79
+ " <th>title</th>\n",
80
+ " <th>year</th>\n",
81
+ " <th>script_plain</th>\n",
82
+ " <th>script_clean</th>\n",
83
+ " </tr>\n",
84
+ " </thead>\n",
85
+ " <tbody>\n",
86
+ " <tr>\n",
87
+ " <th>0</th>\n",
88
+ " <td>8MM_1999</td>\n",
89
+ " <td>tt0134273</td>\n",
90
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
91
+ " <td>Private investigator Tom Welles is contacted b...</td>\n",
92
+ " <td>8MM</td>\n",
93
+ " <td>1999</td>\n",
94
+ " <td>\\n \\n INT. MIAMI AIRPORT, TERMINAL -- ...</td>\n",
95
+ " <td>INT. MIAMI AIRPORT, TERMINAL - DAY\\nAmongst th...</td>\n",
96
+ " </tr>\n",
97
+ " <tr>\n",
98
+ " <th>1</th>\n",
99
+ " <td>The Iron Lady_2011</td>\n",
100
+ " <td>tt1007029</td>\n",
101
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
102
+ " <td>In flashbacks, the audience is shown a young M...</td>\n",
103
+ " <td>The Iron Lady</td>\n",
104
+ " <td>2011</td>\n",
105
+ " <td>\\n \\n INT. SHOP. NR CHESTER SQUARE. LON...</td>\n",
106
+ " <td>INT. SHOP. NR CHESTER SQUARE. LONDON. PRESENT....</td>\n",
107
+ " </tr>\n",
108
+ " <tr>\n",
109
+ " <th>2</th>\n",
110
+ " <td>Adventureland_2009</td>\n",
111
+ " <td>tt1091722</td>\n",
112
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;scene_description&gt;Ad...</td>\n",
113
+ " <td>In 1987, James Brennan plans to have a summer ...</td>\n",
114
+ " <td>Adventureland</td>\n",
115
+ " <td>2009</td>\n",
116
+ " <td>\\n \\n AdVeNtUrElAnD by Greg Mottola rev...</td>\n",
117
+ " <td>AdVeNtUrElAnD by Greg Mottola revised August 5...</td>\n",
118
+ " </tr>\n",
119
+ " <tr>\n",
120
+ " <th>3</th>\n",
121
+ " <td>Napoleon_2023</td>\n",
122
+ " <td>tt13287846</td>\n",
123
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;NAPOLEON&lt;/...</td>\n",
124
+ " <td>In 1793, amid the French Revolution, young arm...</td>\n",
125
+ " <td>Napoleon</td>\n",
126
+ " <td>2023</td>\n",
127
+ " <td>\\n \\n NAPOLEON \\n By \\n David S...</td>\n",
128
+ " <td>NAPOLEON\\nBy\\nDavid Scarpa\\n\\nINT. TUILERIES -...</td>\n",
129
+ " </tr>\n",
130
+ " <tr>\n",
131
+ " <th>4</th>\n",
132
+ " <td>Kubo and the Two Strings_2016</td>\n",
133
+ " <td>tt4302938</td>\n",
134
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;KUBO&lt;/char...</td>\n",
135
+ " <td>In feudal Japan, a 12-year-old boy with only o...</td>\n",
136
+ " <td>Kubo and the Two Strings</td>\n",
137
+ " <td>2016</td>\n",
138
+ " <td>\\n \\n KUBO \\n ... AND THE TWO STRIN...</td>\n",
139
+ " <td>KUBO\\n... AND THE TWO STRINGS\\nScreenplay by M...</td>\n",
140
+ " </tr>\n",
141
+ " <tr>\n",
142
+ " <th>...</th>\n",
143
+ " <td>...</td>\n",
144
+ " <td>...</td>\n",
145
+ " <td>...</td>\n",
146
+ " <td>...</td>\n",
147
+ " <td>...</td>\n",
148
+ " <td>...</td>\n",
149
+ " <td>...</td>\n",
150
+ " <td>...</td>\n",
151
+ " </tr>\n",
152
+ " <tr>\n",
153
+ " <th>2195</th>\n",
154
+ " <td>Terminator Salvation_2009</td>\n",
155
+ " <td>tt0438488</td>\n",
156
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
157
+ " <td>In 2003, Dr. Serena Kogan of Cyberdyne Systems...</td>\n",
158
+ " <td>Terminator Salvation</td>\n",
159
+ " <td>2009</td>\n",
160
+ " <td>\\n \\n INT. DEATH ROW/CELL - DAWN \\n ...</td>\n",
161
+ " <td>INT. DEATH ROW/CELL - DAWN\\nLONGVIEW STATE COR...</td>\n",
162
+ " </tr>\n",
163
+ " <tr>\n",
164
+ " <th>2196</th>\n",
165
+ " <td>Collateral Damage_2002</td>\n",
166
+ " <td>tt0233469</td>\n",
167
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
168
+ " <td>A bomb detonates in the plaza of the Colombian...</td>\n",
169
+ " <td>Collateral Damage</td>\n",
170
+ " <td>2002</td>\n",
171
+ " <td>\\n \\n INT./EXT. NYFD FIREHOUSE - VARIOU...</td>\n",
172
+ " <td>INT./EXT. NYFD FIREHOUSE - VARIOUS SHOTS - NIG...</td>\n",
173
+ " </tr>\n",
174
+ " <tr>\n",
175
+ " <th>2197</th>\n",
176
+ " <td>Wonder Woman_2017</td>\n",
177
+ " <td>tt0451279</td>\n",
178
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;scene_description&gt;FA...</td>\n",
179
+ " <td>In present-day Paris, Diana Prince receives a ...</td>\n",
180
+ " <td>Wonder Woman</td>\n",
181
+ " <td>2017</td>\n",
182
+ " <td>\\n \\n FADE IN : \\n EXT. THE EARTH (...</td>\n",
183
+ " <td>FADE IN :\\nEXT. THE EARTH (FROM SPACE)\\nThe EA...</td>\n",
184
+ " </tr>\n",
185
+ " <tr>\n",
186
+ " <th>2198</th>\n",
187
+ " <td>Flatliners_2017</td>\n",
188
+ " <td>tt2039338</td>\n",
189
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;FLATLINERS...</td>\n",
190
+ " <td>Courtney is a medical student who is obsessed ...</td>\n",
191
+ " <td>Flatliners</td>\n",
192
+ " <td>2017</td>\n",
193
+ " <td>\\n \\n FLATLINERS \\n By \\n Ben R...</td>\n",
194
+ " <td>FLATLINERS\\nBy\\nBen Ripley Based on a screenpl...</td>\n",
195
+ " </tr>\n",
196
+ " <tr>\n",
197
+ " <th>2199</th>\n",
198
+ " <td>Vacancy_2007</td>\n",
199
+ " <td>tt0452702</td>\n",
200
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;VACANCY&lt;/c...</td>\n",
201
+ " <td>On their way home from a family party, David a...</td>\n",
202
+ " <td>Vacancy</td>\n",
203
+ " <td>2007</td>\n",
204
+ " <td>\\n \\n VACANCY \\n by \\n Mark L S...</td>\n",
205
+ " <td>VACANCY\\nby\\nMark L Smith TheHalLieberman Comp...</td>\n",
206
+ " </tr>\n",
207
+ " </tbody>\n",
208
+ "</table>\n",
209
+ "<p>2200 rows × 8 columns</p>\n",
210
+ "</div>"
211
+ ],
212
+ "text/plain": [
213
+ " movie_name imdb_id \\\n",
214
+ "0 8MM_1999 tt0134273 \n",
215
+ "1 The Iron Lady_2011 tt1007029 \n",
216
+ "2 Adventureland_2009 tt1091722 \n",
217
+ "3 Napoleon_2023 tt13287846 \n",
218
+ "4 Kubo and the Two Strings_2016 tt4302938 \n",
219
+ "... ... ... \n",
220
+ "2195 Terminator Salvation_2009 tt0438488 \n",
221
+ "2196 Collateral Damage_2002 tt0233469 \n",
222
+ "2197 Wonder Woman_2017 tt0451279 \n",
223
+ "2198 Flatliners_2017 tt2039338 \n",
224
+ "2199 Vacancy_2007 tt0452702 \n",
225
+ "\n",
226
+ " script \\\n",
227
+ "0 <script>\\n <scene>\\n <stage_direction>INT.... \n",
228
+ "1 <script>\\n <scene>\\n <stage_direction>INT.... \n",
229
+ "2 <script>\\n <scene>\\n <scene_description>Ad... \n",
230
+ "3 <script>\\n <scene>\\n <character>NAPOLEON</... \n",
231
+ "4 <script>\\n <scene>\\n <character>KUBO</char... \n",
232
+ "... ... \n",
233
+ "2195 <script>\\n <scene>\\n <stage_direction>INT.... \n",
234
+ "2196 <script>\\n <scene>\\n <stage_direction>INT.... \n",
235
+ "2197 <script>\\n <scene>\\n <scene_description>FA... \n",
236
+ "2198 <script>\\n <scene>\\n <character>FLATLINERS... \n",
237
+ "2199 <script>\\n <scene>\\n <character>VACANCY</c... \n",
238
+ "\n",
239
+ " summary \\\n",
240
+ "0 Private investigator Tom Welles is contacted b... \n",
241
+ "1 In flashbacks, the audience is shown a young M... \n",
242
+ "2 In 1987, James Brennan plans to have a summer ... \n",
243
+ "3 In 1793, amid the French Revolution, young arm... \n",
244
+ "4 In feudal Japan, a 12-year-old boy with only o... \n",
245
+ "... ... \n",
246
+ "2195 In 2003, Dr. Serena Kogan of Cyberdyne Systems... \n",
247
+ "2196 A bomb detonates in the plaza of the Colombian... \n",
248
+ "2197 In present-day Paris, Diana Prince receives a ... \n",
249
+ "2198 Courtney is a medical student who is obsessed ... \n",
250
+ "2199 On their way home from a family party, David a... \n",
251
+ "\n",
252
+ " title year \\\n",
253
+ "0 8MM 1999 \n",
254
+ "1 The Iron Lady 2011 \n",
255
+ "2 Adventureland 2009 \n",
256
+ "3 Napoleon 2023 \n",
257
+ "4 Kubo and the Two Strings 2016 \n",
258
+ "... ... ... \n",
259
+ "2195 Terminator Salvation 2009 \n",
260
+ "2196 Collateral Damage 2002 \n",
261
+ "2197 Wonder Woman 2017 \n",
262
+ "2198 Flatliners 2017 \n",
263
+ "2199 Vacancy 2007 \n",
264
+ "\n",
265
+ " script_plain \\\n",
266
+ "0 \\n \\n INT. MIAMI AIRPORT, TERMINAL -- ... \n",
267
+ "1 \\n \\n INT. SHOP. NR CHESTER SQUARE. LON... \n",
268
+ "2 \\n \\n AdVeNtUrElAnD by Greg Mottola rev... \n",
269
+ "3 \\n \\n NAPOLEON \\n By \\n David S... \n",
270
+ "4 \\n \\n KUBO \\n ... AND THE TWO STRIN... \n",
271
+ "... ... \n",
272
+ "2195 \\n \\n INT. DEATH ROW/CELL - DAWN \\n ... \n",
273
+ "2196 \\n \\n INT./EXT. NYFD FIREHOUSE - VARIOU... \n",
274
+ "2197 \\n \\n FADE IN : \\n EXT. THE EARTH (... \n",
275
+ "2198 \\n \\n FLATLINERS \\n By \\n Ben R... \n",
276
+ "2199 \\n \\n VACANCY \\n by \\n Mark L S... \n",
277
+ "\n",
278
+ " script_clean \n",
279
+ "0 INT. MIAMI AIRPORT, TERMINAL - DAY\\nAmongst th... \n",
280
+ "1 INT. SHOP. NR CHESTER SQUARE. LONDON. PRESENT.... \n",
281
+ "2 AdVeNtUrElAnD by Greg Mottola revised August 5... \n",
282
+ "3 NAPOLEON\\nBy\\nDavid Scarpa\\n\\nINT. TUILERIES -... \n",
283
+ "4 KUBO\\n... AND THE TWO STRINGS\\nScreenplay by M... \n",
284
+ "... ... \n",
285
+ "2195 INT. DEATH ROW/CELL - DAWN\\nLONGVIEW STATE COR... \n",
286
+ "2196 INT./EXT. NYFD FIREHOUSE - VARIOUS SHOTS - NIG... \n",
287
+ "2197 FADE IN :\\nEXT. THE EARTH (FROM SPACE)\\nThe EA... \n",
288
+ "2198 FLATLINERS\\nBy\\nBen Ripley Based on a screenpl... \n",
289
+ "2199 VACANCY\\nby\\nMark L Smith TheHalLieberman Comp... \n",
290
+ "\n",
291
+ "[2200 rows x 8 columns]"
292
+ ]
293
+ },
294
+ "metadata": {},
295
+ "output_type": "display_data"
296
+ }
297
+ ],
298
+ "source": [
299
+ "# loading screeplay data from hugging-face: MovieSum\n",
300
+ "\n",
301
+ "movie_sum_ds = load_dataset(\"rohitsaxena/MovieSum\")\n",
302
+ "\n",
303
+ "# to pandas\n",
304
+ "df_train = movie_sum_ds[\"train\"].to_pandas()\n",
305
+ "df_val = movie_sum_ds[\"validation\"].to_pandas()\n",
306
+ "df_test = movie_sum_ds[\"test\"].to_pandas()\n",
307
+ "\n",
308
+ "#display(df_train)\n",
309
+ "#display(df_val)\n",
310
+ "#display(df_test)\n",
311
+ "\n",
312
+ "# concatinate all to one file for merging with nomination data\n",
313
+ "movie_sum_df = pd.concat([df_train, df_val, df_test], ignore_index=True)\n",
314
+ "\n",
315
+ "\n",
316
+ "# preparing and cleaning data for sentence embeddings\n",
317
+ "\n",
318
+ "# extract title and year from movie_name\n",
319
+ "movie_sum_df[[\"title\", \"year\"]] = movie_sum_df[\"movie_name\"].str.extract(r\"^(.*)_(\\d{4})$\")\n",
320
+ "movie_sum_df[\"year\"] = movie_sum_df[\"year\"].astype(float).astype(\"Int64\")\n",
321
+ "\n",
322
+ "#print(movie_sum_labeled.loc[0, 'script'])\n",
323
+ "# xml-tags in script, we need script_plain without tags\n",
324
+ "\"\"\"\n",
325
+ "<script>\n",
326
+ "<scene>\n",
327
+ "<stage_direction>..</stage_direction>\n",
328
+ "<scene_description>...</scene_description>\n",
329
+ "<character>..</character>\n",
330
+ "<dialogue>..</dialogue>\n",
331
+ "...\n",
332
+ "</scene>\n",
333
+ "<scene>\n",
334
+ "...\n",
335
+ "</scene>\n",
336
+ "<script>\n",
337
+ "\"\"\"\n",
338
+ "def strip_xml_tags(text):\n",
339
+ " if text is None:\n",
340
+ " return \"\"\n",
341
+ " return re.sub(r\"<[^>]+>\", \" \", str(text))\n",
342
+ "\n",
343
+ "movie_sum_df[\"script_plain\"] = movie_sum_df[\"script\"].apply(strip_xml_tags)\n",
344
+ "\n",
345
+ "# just to be safe, we clean script_plain by standards\n",
346
+ "\n",
347
+ "# print(movie_sum_df.loc[0, 'script_plain'])\n",
348
+ "\n",
349
+ "# regex\n",
350
+ "\n",
351
+ "_REMOVE_LINE = re.compile(\n",
352
+ " r\"\"\"\n",
353
+ " ^\\s*( # ganze Zeile prüfen\n",
354
+ " page\\s+\\d+(\\s+of\\s+\\d+)? | # \"Page 12\" / \"Page 12 of 120\"\n",
355
+ " scanned\\s+by.* | # Scans/Wasserzeichen\n",
356
+ " downloaded\\s+by.* |\n",
357
+ " transcribed\\s+by.* |\n",
358
+ " copyright.* |\n",
359
+ " all\\s+rights\\s+reserved.* |\n",
360
+ " [-=_]{3,} # Linien aus --- === ___\n",
361
+ " )\\s*$\"\"\",\n",
362
+ " re.IGNORECASE | re.VERBOSE\n",
363
+ ")\n",
364
+ "\n",
365
+ "_REMOVE_TRANSITION = re.compile(\n",
366
+ " r\"^\\s*(CUT TO:|DISSOLVE TO:|SMASH CUT TO:|MATCH CUT TO:|FADE IN:|FADE OUT:)\\s*$\",\n",
367
+ " re.IGNORECASE\n",
368
+ ")\n",
369
+ "\n",
370
+ "_TIME_WORDS = r\"(DAY|NIGHT|EVENING|MORNING|AFTERNOON|DAWN|DUSK|LATER|CONTINUOUS|SAME)\"\n",
371
+ "_TIME_AT_END = re.compile(rf\"\\s*[-–—]{{1,2}}\\s*{_TIME_WORDS}\\s*$\", re.IGNORECASE)\n",
372
+ "_TIME_ONLY = re.compile(rf\"^\\s*[-–—]{{1,2}}\\s*{_TIME_WORDS}\\s*$\", re.IGNORECASE)\n",
373
+ "\n",
374
+ "\n",
375
+ "\n",
376
+ "def normalize_unicode(text):\n",
377
+ " text = unicodedata.normalize(\"NFC\", text)\n",
378
+ " text = text.replace(\"\\r\\n\", \"\\n\").replace(\"\\r\", \"\\n\").replace(\"\\t\", \" \")\n",
379
+ " text = (text\n",
380
+ " .replace(\"\\u00A0\", \" \") \n",
381
+ " .replace(\"\\u2018\", \"'\").replace(\"\\u2019\", \"'\") # ‘ ’ -> '\n",
382
+ " .replace(\"\\u201C\", '\"').replace(\"\\u201D\", '\"') # “ ” -> \"\n",
383
+ " .replace(\"\\u2013\", \"-\").replace(\"\\u2014\", \"-\") # – — -> -\n",
384
+ " )\n",
385
+ " return text\n",
386
+ "\n",
387
+ "\n",
388
+ "def clean_lines(text):\n",
389
+ " out_lines = []\n",
390
+ " for raw in text.split(\"\\n\"):\n",
391
+ " line = raw.strip()\n",
392
+ " if not line:\n",
393
+ " out_lines.append(\"\") # keep structure\n",
394
+ " continue\n",
395
+ " if _REMOVE_LINE.match(line) or _REMOVE_TRANSITION.match(line):\n",
396
+ " continue\n",
397
+ " # reduce multiple spaces within the line\n",
398
+ " line = re.sub(r\"[ \\u00A0]{2,}\", \" \", line)\n",
399
+ " # normalize sluglines:\n",
400
+ " # \"INT. OFFICE -- DAY\" -> \"INT. OFFICE - DAY\"\n",
401
+ " # \"--NIGHT\" -> \"NIGHT\"\n",
402
+ " # \"EXT. STREET – evening\"-> \"EXT. STREET - EVENING\"\n",
403
+ " line = _TIME_AT_END.sub(lambda m: f\" - {m.group(1).upper()}\", line)\n",
404
+ " line = _TIME_ONLY.sub(lambda m: m.group(1).upper(), line)\n",
405
+ " out_lines.append(line)\n",
406
+ " # reduce multiple blank lines to max 1\n",
407
+ " cleaned = \"\\n\".join(out_lines)\n",
408
+ " cleaned = re.sub(r\"\\n{3,}\", \"\\n\\n\", cleaned).strip()\n",
409
+ " return cleaned\n",
410
+ "\n",
411
+ "\n",
412
+ "\n",
413
+ "def clean_script(text):\n",
414
+ " text = normalize_unicode(text)\n",
415
+ " text = clean_lines(text)\n",
416
+ " return text\n",
417
+ "\n",
418
+ "movie_sum_df[\"script_clean\"] = movie_sum_df[\"script_plain\"].apply(clean_script)\n",
419
+ "\n",
420
+ "\n",
421
+ "# just have a look at data\n",
422
+ "display(movie_sum_df)\n"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "code",
427
+ "execution_count": 39,
428
+ "id": "456f0c8f-479d-4631-a074-22c236c89565",
429
+ "metadata": {},
430
+ "outputs": [
431
+ {
432
+ "name": "stdout",
433
+ "output_type": "stream",
434
+ "text": [
435
+ " imdb_id movie_name nominated winner\n",
436
+ "0 tt0134273 8MM_1999 0 0\n",
437
+ "1 tt1007029 The Iron Lady_2011 0 0\n",
438
+ "2 tt1091722 Adventureland_2009 0 0\n",
439
+ "3 tt13287846 Napoleon_2023 0 0\n",
440
+ "4 tt4302938 Kubo and the Two Strings_2016 1 0\n",
441
+ "Gesamt: 2200 | nominated=1: 417 | winner=1: 0\n",
442
+ "\n",
443
+ "\n",
444
+ "\n"
445
+ ]
446
+ },
447
+ {
448
+ "data": {
449
+ "text/html": [
450
+ "<div>\n",
451
+ "<style scoped>\n",
452
+ " .dataframe tbody tr th:only-of-type {\n",
453
+ " vertical-align: middle;\n",
454
+ " }\n",
455
+ "\n",
456
+ " .dataframe tbody tr th {\n",
457
+ " vertical-align: top;\n",
458
+ " }\n",
459
+ "\n",
460
+ " .dataframe thead th {\n",
461
+ " text-align: right;\n",
462
+ " }\n",
463
+ "</style>\n",
464
+ "<table border=\"1\" class=\"dataframe\">\n",
465
+ " <thead>\n",
466
+ " <tr style=\"text-align: right;\">\n",
467
+ " <th></th>\n",
468
+ " <th>movie_name</th>\n",
469
+ " <th>imdb_id</th>\n",
470
+ " <th>script</th>\n",
471
+ " <th>summary</th>\n",
472
+ " <th>title</th>\n",
473
+ " <th>year</th>\n",
474
+ " <th>script_plain</th>\n",
475
+ " <th>script_clean</th>\n",
476
+ " <th>nominated</th>\n",
477
+ " <th>winner</th>\n",
478
+ " </tr>\n",
479
+ " </thead>\n",
480
+ " <tbody>\n",
481
+ " <tr>\n",
482
+ " <th>0</th>\n",
483
+ " <td>8MM_1999</td>\n",
484
+ " <td>tt0134273</td>\n",
485
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
486
+ " <td>Private investigator Tom Welles is contacted b...</td>\n",
487
+ " <td>8MM</td>\n",
488
+ " <td>1999</td>\n",
489
+ " <td>\\n \\n INT. MIAMI AIRPORT, TERMINAL -- ...</td>\n",
490
+ " <td>INT. MIAMI AIRPORT, TERMINAL - DAY\\nAmongst th...</td>\n",
491
+ " <td>0</td>\n",
492
+ " <td>0</td>\n",
493
+ " </tr>\n",
494
+ " <tr>\n",
495
+ " <th>1</th>\n",
496
+ " <td>The Iron Lady_2011</td>\n",
497
+ " <td>tt1007029</td>\n",
498
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
499
+ " <td>In flashbacks, the audience is shown a young M...</td>\n",
500
+ " <td>The Iron Lady</td>\n",
501
+ " <td>2011</td>\n",
502
+ " <td>\\n \\n INT. SHOP. NR CHESTER SQUARE. LON...</td>\n",
503
+ " <td>INT. SHOP. NR CHESTER SQUARE. LONDON. PRESENT....</td>\n",
504
+ " <td>0</td>\n",
505
+ " <td>0</td>\n",
506
+ " </tr>\n",
507
+ " <tr>\n",
508
+ " <th>2</th>\n",
509
+ " <td>Adventureland_2009</td>\n",
510
+ " <td>tt1091722</td>\n",
511
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;scene_description&gt;Ad...</td>\n",
512
+ " <td>In 1987, James Brennan plans to have a summer ...</td>\n",
513
+ " <td>Adventureland</td>\n",
514
+ " <td>2009</td>\n",
515
+ " <td>\\n \\n AdVeNtUrElAnD by Greg Mottola rev...</td>\n",
516
+ " <td>AdVeNtUrElAnD by Greg Mottola revised August 5...</td>\n",
517
+ " <td>0</td>\n",
518
+ " <td>0</td>\n",
519
+ " </tr>\n",
520
+ " <tr>\n",
521
+ " <th>3</th>\n",
522
+ " <td>Napoleon_2023</td>\n",
523
+ " <td>tt13287846</td>\n",
524
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;NAPOLEON&lt;/...</td>\n",
525
+ " <td>In 1793, amid the French Revolution, young arm...</td>\n",
526
+ " <td>Napoleon</td>\n",
527
+ " <td>2023</td>\n",
528
+ " <td>\\n \\n NAPOLEON \\n By \\n David S...</td>\n",
529
+ " <td>NAPOLEON\\nBy\\nDavid Scarpa\\n\\nINT. TUILERIES -...</td>\n",
530
+ " <td>0</td>\n",
531
+ " <td>0</td>\n",
532
+ " </tr>\n",
533
+ " <tr>\n",
534
+ " <th>4</th>\n",
535
+ " <td>Kubo and the Two Strings_2016</td>\n",
536
+ " <td>tt4302938</td>\n",
537
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;KUBO&lt;/char...</td>\n",
538
+ " <td>In feudal Japan, a 12-year-old boy with only o...</td>\n",
539
+ " <td>Kubo and the Two Strings</td>\n",
540
+ " <td>2016</td>\n",
541
+ " <td>\\n \\n KUBO \\n ... AND THE TWO STRIN...</td>\n",
542
+ " <td>KUBO\\n... AND THE TWO STRINGS\\nScreenplay by M...</td>\n",
543
+ " <td>1</td>\n",
544
+ " <td>0</td>\n",
545
+ " </tr>\n",
546
+ " <tr>\n",
547
+ " <th>...</th>\n",
548
+ " <td>...</td>\n",
549
+ " <td>...</td>\n",
550
+ " <td>...</td>\n",
551
+ " <td>...</td>\n",
552
+ " <td>...</td>\n",
553
+ " <td>...</td>\n",
554
+ " <td>...</td>\n",
555
+ " <td>...</td>\n",
556
+ " <td>...</td>\n",
557
+ " <td>...</td>\n",
558
+ " </tr>\n",
559
+ " <tr>\n",
560
+ " <th>2195</th>\n",
561
+ " <td>Terminator Salvation_2009</td>\n",
562
+ " <td>tt0438488</td>\n",
563
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
564
+ " <td>In 2003, Dr. Serena Kogan of Cyberdyne Systems...</td>\n",
565
+ " <td>Terminator Salvation</td>\n",
566
+ " <td>2009</td>\n",
567
+ " <td>\\n \\n INT. DEATH ROW/CELL - DAWN \\n ...</td>\n",
568
+ " <td>INT. DEATH ROW/CELL - DAWN\\nLONGVIEW STATE COR...</td>\n",
569
+ " <td>0</td>\n",
570
+ " <td>0</td>\n",
571
+ " </tr>\n",
572
+ " <tr>\n",
573
+ " <th>2196</th>\n",
574
+ " <td>Collateral Damage_2002</td>\n",
575
+ " <td>tt0233469</td>\n",
576
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;stage_direction&gt;INT....</td>\n",
577
+ " <td>A bomb detonates in the plaza of the Colombian...</td>\n",
578
+ " <td>Collateral Damage</td>\n",
579
+ " <td>2002</td>\n",
580
+ " <td>\\n \\n INT./EXT. NYFD FIREHOUSE - VARIOU...</td>\n",
581
+ " <td>INT./EXT. NYFD FIREHOUSE - VARIOUS SHOTS - NIG...</td>\n",
582
+ " <td>0</td>\n",
583
+ " <td>0</td>\n",
584
+ " </tr>\n",
585
+ " <tr>\n",
586
+ " <th>2197</th>\n",
587
+ " <td>Wonder Woman_2017</td>\n",
588
+ " <td>tt0451279</td>\n",
589
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;scene_description&gt;FA...</td>\n",
590
+ " <td>In present-day Paris, Diana Prince receives a ...</td>\n",
591
+ " <td>Wonder Woman</td>\n",
592
+ " <td>2017</td>\n",
593
+ " <td>\\n \\n FADE IN : \\n EXT. THE EARTH (...</td>\n",
594
+ " <td>FADE IN :\\nEXT. THE EARTH (FROM SPACE)\\nThe EA...</td>\n",
595
+ " <td>0</td>\n",
596
+ " <td>0</td>\n",
597
+ " </tr>\n",
598
+ " <tr>\n",
599
+ " <th>2198</th>\n",
600
+ " <td>Flatliners_2017</td>\n",
601
+ " <td>tt2039338</td>\n",
602
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;FLATLINERS...</td>\n",
603
+ " <td>Courtney is a medical student who is obsessed ...</td>\n",
604
+ " <td>Flatliners</td>\n",
605
+ " <td>2017</td>\n",
606
+ " <td>\\n \\n FLATLINERS \\n By \\n Ben R...</td>\n",
607
+ " <td>FLATLINERS\\nBy\\nBen Ripley Based on a screenpl...</td>\n",
608
+ " <td>0</td>\n",
609
+ " <td>0</td>\n",
610
+ " </tr>\n",
611
+ " <tr>\n",
612
+ " <th>2199</th>\n",
613
+ " <td>Vacancy_2007</td>\n",
614
+ " <td>tt0452702</td>\n",
615
+ " <td>&lt;script&gt;\\n &lt;scene&gt;\\n &lt;character&gt;VACANCY&lt;/c...</td>\n",
616
+ " <td>On their way home from a family party, David a...</td>\n",
617
+ " <td>Vacancy</td>\n",
618
+ " <td>2007</td>\n",
619
+ " <td>\\n \\n VACANCY \\n by \\n Mark L S...</td>\n",
620
+ " <td>VACANCY\\nby\\nMark L Smith TheHalLieberman Comp...</td>\n",
621
+ " <td>0</td>\n",
622
+ " <td>0</td>\n",
623
+ " </tr>\n",
624
+ " </tbody>\n",
625
+ "</table>\n",
626
+ "<p>2200 rows × 10 columns</p>\n",
627
+ "</div>"
628
+ ],
629
+ "text/plain": [
630
+ " movie_name imdb_id \\\n",
631
+ "0 8MM_1999 tt0134273 \n",
632
+ "1 The Iron Lady_2011 tt1007029 \n",
633
+ "2 Adventureland_2009 tt1091722 \n",
634
+ "3 Napoleon_2023 tt13287846 \n",
635
+ "4 Kubo and the Two Strings_2016 tt4302938 \n",
636
+ "... ... ... \n",
637
+ "2195 Terminator Salvation_2009 tt0438488 \n",
638
+ "2196 Collateral Damage_2002 tt0233469 \n",
639
+ "2197 Wonder Woman_2017 tt0451279 \n",
640
+ "2198 Flatliners_2017 tt2039338 \n",
641
+ "2199 Vacancy_2007 tt0452702 \n",
642
+ "\n",
643
+ " script \\\n",
644
+ "0 <script>\\n <scene>\\n <stage_direction>INT.... \n",
645
+ "1 <script>\\n <scene>\\n <stage_direction>INT.... \n",
646
+ "2 <script>\\n <scene>\\n <scene_description>Ad... \n",
647
+ "3 <script>\\n <scene>\\n <character>NAPOLEON</... \n",
648
+ "4 <script>\\n <scene>\\n <character>KUBO</char... \n",
649
+ "... ... \n",
650
+ "2195 <script>\\n <scene>\\n <stage_direction>INT.... \n",
651
+ "2196 <script>\\n <scene>\\n <stage_direction>INT.... \n",
652
+ "2197 <script>\\n <scene>\\n <scene_description>FA... \n",
653
+ "2198 <script>\\n <scene>\\n <character>FLATLINERS... \n",
654
+ "2199 <script>\\n <scene>\\n <character>VACANCY</c... \n",
655
+ "\n",
656
+ " summary \\\n",
657
+ "0 Private investigator Tom Welles is contacted b... \n",
658
+ "1 In flashbacks, the audience is shown a young M... \n",
659
+ "2 In 1987, James Brennan plans to have a summer ... \n",
660
+ "3 In 1793, amid the French Revolution, young arm... \n",
661
+ "4 In feudal Japan, a 12-year-old boy with only o... \n",
662
+ "... ... \n",
663
+ "2195 In 2003, Dr. Serena Kogan of Cyberdyne Systems... \n",
664
+ "2196 A bomb detonates in the plaza of the Colombian... \n",
665
+ "2197 In present-day Paris, Diana Prince receives a ... \n",
666
+ "2198 Courtney is a medical student who is obsessed ... \n",
667
+ "2199 On their way home from a family party, David a... \n",
668
+ "\n",
669
+ " title year \\\n",
670
+ "0 8MM 1999 \n",
671
+ "1 The Iron Lady 2011 \n",
672
+ "2 Adventureland 2009 \n",
673
+ "3 Napoleon 2023 \n",
674
+ "4 Kubo and the Two Strings 2016 \n",
675
+ "... ... ... \n",
676
+ "2195 Terminator Salvation 2009 \n",
677
+ "2196 Collateral Damage 2002 \n",
678
+ "2197 Wonder Woman 2017 \n",
679
+ "2198 Flatliners 2017 \n",
680
+ "2199 Vacancy 2007 \n",
681
+ "\n",
682
+ " script_plain \\\n",
683
+ "0 \\n \\n INT. MIAMI AIRPORT, TERMINAL -- ... \n",
684
+ "1 \\n \\n INT. SHOP. NR CHESTER SQUARE. LON... \n",
685
+ "2 \\n \\n AdVeNtUrElAnD by Greg Mottola rev... \n",
686
+ "3 \\n \\n NAPOLEON \\n By \\n David S... \n",
687
+ "4 \\n \\n KUBO \\n ... AND THE TWO STRIN... \n",
688
+ "... ... \n",
689
+ "2195 \\n \\n INT. DEATH ROW/CELL - DAWN \\n ... \n",
690
+ "2196 \\n \\n INT./EXT. NYFD FIREHOUSE - VARIOU... \n",
691
+ "2197 \\n \\n FADE IN : \\n EXT. THE EARTH (... \n",
692
+ "2198 \\n \\n FLATLINERS \\n By \\n Ben R... \n",
693
+ "2199 \\n \\n VACANCY \\n by \\n Mark L S... \n",
694
+ "\n",
695
+ " script_clean nominated winner \n",
696
+ "0 INT. MIAMI AIRPORT, TERMINAL - DAY\\nAmongst th... 0 0 \n",
697
+ "1 INT. SHOP. NR CHESTER SQUARE. LONDON. PRESENT.... 0 0 \n",
698
+ "2 AdVeNtUrElAnD by Greg Mottola revised August 5... 0 0 \n",
699
+ "3 NAPOLEON\\nBy\\nDavid Scarpa\\n\\nINT. TUILERIES -... 0 0 \n",
700
+ "4 KUBO\\n... AND THE TWO STRINGS\\nScreenplay by M... 1 0 \n",
701
+ "... ... ... ... \n",
702
+ "2195 INT. DEATH ROW/CELL - DAWN\\nLONGVIEW STATE COR... 0 0 \n",
703
+ "2196 INT./EXT. NYFD FIREHOUSE - VARIOUS SHOTS - NIG... 0 0 \n",
704
+ "2197 FADE IN :\\nEXT. THE EARTH (FROM SPACE)\\nThe EA... 0 0 \n",
705
+ "2198 FLATLINERS\\nBy\\nBen Ripley Based on a screenpl... 0 0 \n",
706
+ "2199 VACANCY\\nby\\nMark L Smith TheHalLieberman Comp... 0 0 \n",
707
+ "\n",
708
+ "[2200 rows x 10 columns]"
709
+ ]
710
+ },
711
+ "metadata": {},
712
+ "output_type": "display_data"
713
+ }
714
+ ],
715
+ "source": [
716
+ "# read oscars.csv and extract nomination, win each FilmId to merge with MovieSum\n",
717
+ "# Oscar Class \"Writing\" and \"Title\" are used as screenplay data for the model\n",
718
+ "\n",
719
+ "\"\"\"\n",
720
+ "use of oscars.csv under\n",
721
+ "BSD 2-Clause License\n",
722
+ "\n",
723
+ "Copyright (c) 2022, David V. Lu!!\n",
724
+ "All rights reserved.\n",
725
+ "\n",
726
+ "Redistribution and use in source and binary forms, with or without\n",
727
+ "modification, are permitted provided that the following conditions are met:\n",
728
+ "\n",
729
+ "1. Redistributions of source code must retain the above copyright notice, this\n",
730
+ " list of conditions and the following disclaimer.\n",
731
+ "\n",
732
+ "2. Redistributions in binary form must reproduce the above copyright notice,\n",
733
+ " this list of conditions and the following disclaimer in the documentation\n",
734
+ " and/or other materials provided with the distribution.\n",
735
+ "\n",
736
+ "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n",
737
+ "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n",
738
+ "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n",
739
+ "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n",
740
+ "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n",
741
+ "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n",
742
+ "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n",
743
+ "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n",
744
+ "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n",
745
+ "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n",
746
+ "\"\"\"\n",
747
+ "\n",
748
+ "oscars_df = pd.read_csv(\"data/oscars.csv\", sep=\"\\t\")\n",
749
+ "oscar_class = oscars_df.get(\"Class\").astype(str).str.lower()\n",
750
+ "\n",
751
+ "# we use \"writing\" and \"title\"\n",
752
+ "osc = oscars_df.loc[oscar_class.isin([\"writing\", \"title\"]), [\"FilmId\", \"Winner\"]].copy()\n",
753
+ "\n",
754
+ "# we need the imdb_id to merge with MovieSum\n",
755
+ "osc[\"imdb_id\"] = osc[\"FilmId\"].astype(str).str.findall(r\"tt\\d+\")\n",
756
+ "osc = osc.explode(\"imdb_id\").dropna(subset=[\"imdb_id\"])\n",
757
+ "\n",
758
+ "# perhaps Oscar-Winner is needed later on, but for class writing and title no screenplay in MovieSum!\n",
759
+ "osc[\"winner\"] = (osc[\"Winner\"] == \"True\").astype(int)\n",
760
+ "\n",
761
+ "#display(osc) #just have a look\n",
762
+ "\n",
763
+ "# prepare and merge\n",
764
+ "labels_df = (\n",
765
+ " osc.groupby(\"imdb_id\", as_index=False)[\"winner\"].max()\n",
766
+ " .assign(nominated=1)[[\"imdb_id\",\"nominated\",\"winner\"]])\n",
767
+ "\n",
768
+ "movie_sum_labeled = movie_sum_df.merge(labels_df, on=\"imdb_id\", how=\"left\")\n",
769
+ "movie_sum_labeled[\"nominated\"] = movie_sum_labeled[\"nominated\"].fillna(0).astype(int)\n",
770
+ "movie_sum_labeled[\"winner\"] = movie_sum_labeled[\"winner\"].fillna(0).astype(int)\n",
771
+ "\n",
772
+ "# check data OK?\n",
773
+ "print(movie_sum_labeled[[\"imdb_id\",\"movie_name\",\"nominated\",\"winner\"]].head())\n",
774
+ "print(\"Sum:\", len(movie_sum_labeled), \"| nominated=1:\", movie_sum_labeled[\"nominated\"].sum(), \"| winner=1:\", movie_sum_labeled[\"winner\"].sum())\n",
775
+ "print(\"\\n\\n\")\n",
776
+ "display(movie_sum_labeled)"
777
+ ]
778
+ },
779
+ {
780
+ "cell_type": "code",
781
+ "execution_count": null,
782
+ "id": "15f3bbe0-21dd-4067-bcf4-58966a9ed96a",
783
+ "metadata": {},
784
+ "outputs": [],
785
+ "source": [
786
+ "# path store dataframe local\n",
787
+ "\n",
788
+ "outdir = Path(\"data/export_moviesum_oscar\")\n",
789
+ "outdir.mkdir(parents=True, exist_ok=True)\n",
790
+ "\n",
791
+ "parquet_path = outdir / \"movie_sum_labeled.parquet\"\n",
792
+ "movie_sum_labeled.to_parquet(parquet_path, index=False)\n",
793
+ "\n",
794
+ "csv_path = outdir / \"movie_sum_labeled.csv\"\n",
795
+ "movie_sum_labeled.to_csv(csv_path, index=False)\n"
796
+ ]
797
+ },
798
+ {
799
+ "cell_type": "code",
800
+ "execution_count": null,
801
+ "id": "b4ded244-3186-47b1-baef-be8621a0b89b",
802
+ "metadata": {},
803
+ "outputs": [],
804
+ "source": [
805
+ "# upload to hugging face\n",
806
+ "\n"
807
+ ]
808
+ },
809
+ {
810
+ "cell_type": "code",
811
+ "execution_count": 41,
812
+ "id": "f32a453f-4735-4bf8-90a8-8af36a31a525",
813
+ "metadata": {},
814
+ "outputs": [
815
+ {
816
+ "name": "stdout",
817
+ "output_type": "stream",
818
+ "text": [
819
+ "Split sizes: 1320 train | 440 val | 440 test\n",
820
+ "Positive rate (nominated=1):\n",
821
+ " train: 0.1893939393939394\n",
822
+ " val: 0.19090909090909092\n",
823
+ " test: 0.18863636363636363\n",
824
+ "Splits saved to: data\\splits\\split_60_20_20.npz\n"
825
+ ]
826
+ }
827
+ ],
828
+ "source": [
829
+ "# load data and split train val test\n",
830
+ "\n",
831
+ "parquet_path = Path(\"data/export_moviesum_oscar/movie_sum_labeled.parquet\")\n",
832
+ "df = pd.read_parquet(parquet_path)\n",
833
+ "\n",
834
+ "y = df[\"nominated\"].astype(int).to_numpy()\n",
835
+ "idx_all = np.arange(len(y))\n",
836
+ "\n",
837
+ "# 60% train, 40% rest\n",
838
+ "idx_train, idx_rest, y_train, y_rest = train_test_split(idx_all, y , test_size=0.40, stratify=y, random_state=42)\n",
839
+ "\n",
840
+ "# rest split to: 20% val, 20% test\n",
841
+ "idx_val, idx_test, y_val, y_test = train_test_split(idx_rest, y_rest, test_size=0.50, stratify=y_rest, random_state=42)\n",
842
+ "\n",
843
+ "# check\n",
844
+ "print(\"Split sizes:\",\n",
845
+ " len(idx_train), \"train \",\n",
846
+ " len(idx_val), \"val \",\n",
847
+ " len(idx_test), \"test\")\n",
848
+ "\n",
849
+ "print(\"Positive rate (nominated=1):\")\n",
850
+ "print(\" train:\", y_train.mean())\n",
851
+ "print(\" val: \", y_val.mean())\n",
852
+ "print(\" test: \", y_test.mean())\n",
853
+ "\n",
854
+ "# store .npz - numpy zip format, easy to use\n",
855
+ "outdir = Path(\"data/splits\")\n",
856
+ "outdir.mkdir(parents=True, exist_ok=True)\n",
857
+ "\n",
858
+ "np.savez(outdir/\"split_60_20_20.npz\",\n",
859
+ " idx_train=idx_train,\n",
860
+ " idx_val=idx_val,\n",
861
+ " idx_test=idx_test)\n"
862
+ ]
863
+ },
864
+ {
865
+ "cell_type": "code",
866
+ "execution_count": 81,
867
+ "id": "7bf8194f-638d-4d1b-9265-4ca075681b27",
868
+ "metadata": {},
869
+ "outputs": [
870
+ {
871
+ "name": "stdout",
872
+ "output_type": "stream",
873
+ "text": [
874
+ "Sizes train: 1320 val: 440 test: 440\n"
875
+ ]
876
+ }
877
+ ],
878
+ "source": [
879
+ "# load splits\n",
880
+ "split_path = \"data/splits/split_60_20_20.npz\"\n",
881
+ "split = np.load(split_path)\n",
882
+ "idx_train, idx_val, idx_test = split[\"idx_train\"], split[\"idx_val\"], split[\"idx_test\"]\n",
883
+ "\n",
884
+ "print(\"Sizes train:\", len(idx_train), \" val:\", len(idx_val), \" test:\", len(idx_test))\n",
885
+ "\n"
886
+ ]
887
+ },
888
+ {
889
+ "cell_type": "code",
890
+ "execution_count": 53,
891
+ "id": "33017256-a837-4c78-902d-e363ce2248bc",
892
+ "metadata": {},
893
+ "outputs": [],
894
+ "source": [
895
+ "# chunking functions, prepare for embedder\n",
896
+ "\n",
897
+ "def as_single_chunk(text):\n",
898
+ " \n",
899
+ " # return just one chunk (e.g. for titles) \n",
900
+ " return [text.strip()]\n",
901
+ "\n",
902
+ "def chunk_text(text, max_words=400, overlap=80):\n",
903
+ " \n",
904
+ " # word-based chunking with overlap, max_words: number of words per chunk, overlap to preserve context\n",
905
+ "\n",
906
+ " words = text.split()\n",
907
+ " if not words:\n",
908
+ " return [\"\"]\n",
909
+ " \n",
910
+ " chunks = []\n",
911
+ " i = 0\n",
912
+ " step = max(1, max_words - overlap)\n",
913
+ " \n",
914
+ " while i < len(words):\n",
915
+ " chunks.append(\" \".join(words[i:i+max_words]))\n",
916
+ " i += step\n",
917
+ " return chunks\n",
918
+ "\n",
919
+ "# chunking stats for report\n",
920
+ "def chunk_stats(list_of_chunks):\n",
921
+ " n_chunks = np.array([len(x) for x in list_of_chunks], dtype=np.int32)\n",
922
+ " return {\n",
923
+ " \"docs\": len(n_chunks),\n",
924
+ " \"avg_chunks\": float(n_chunks.mean()) if len(n_chunks) else 0.0,\n",
925
+ " \"max\": int(n_chunks.max()) if len(n_chunks) else 0,\n",
926
+ " }\n"
927
+ ]
928
+ },
929
+ {
930
+ "cell_type": "code",
931
+ "execution_count": 76,
932
+ "id": "fd905b90-51d4-4d0d-bdc8-e7c5702dd6b0",
933
+ "metadata": {},
934
+ "outputs": [
935
+ {
936
+ "name": "stderr",
937
+ "output_type": "stream",
938
+ "text": [
939
+ "Chunking summary: 100%|██████████████████████████████████████████████████████████| 2200/2200 [00:00<00:00, 4352.92it/s]\n",
940
+ "Chunking script_clean: 100%|██████████████████████████████████████████████████████| 2200/2200 [00:17<00:00, 127.06it/s]\n"
941
+ ]
942
+ },
943
+ {
944
+ "name": "stdout",
945
+ "output_type": "stream",
946
+ "text": [
947
+ "\n",
948
+ "chunking stats:\n",
949
+ "title : {'docs': 2200, 'avg_chunks': 1.0, 'max': 1}\n",
950
+ "summary: {'docs': 2200, 'avg_chunks': 2.505909090909091, 'max': 6}\n",
951
+ "script : {'docs': 2200, 'avg_chunks': 77.03818181818181, 'max': 186}\n"
952
+ ]
953
+ }
954
+ ],
955
+ "source": [
956
+ "# chunking title, summary, script_clean and storing in joblib\n",
957
+ "\n",
958
+ "cache_dir = Path(\"chunks_cache\")\n",
959
+ "cache_dir.mkdir(parents=True, exist_ok=True)\n",
960
+ "\n",
961
+ "# title: just 1 chunk\n",
962
+ "title_chunks_path = cache_dir / \"title_chunks.joblib\"\n",
963
+ "if title_chunks_path.exists():\n",
964
+ " title_chunks = joblib.load(title_chunks_path)\n",
965
+ "else:\n",
966
+ " title_chunks = [as_single_chunk(t) for t in tqdm(df[\"title\"].fillna(\"\").astype(str).tolist(), desc=\"chunking title\")]\n",
967
+ " joblib.dump(title_chunks, title_chunks_path)\n",
968
+ "\n",
969
+ "# summary: word-based chunking\n",
970
+ "summary_chunks_path = cache_dir / \"summary_chunks.joblib\"\n",
971
+ "if summary_chunks_path.exists():\n",
972
+ " summary_chunks = joblib.load(summary_chunks_path)\n",
973
+ "else:\n",
974
+ " summary_chunks = [chunk_text(s, max_words=400, overlap=80) for s in tqdm(df[\"summary\"].fillna(\"\").astype(str).tolist(), desc=\"chunking summary\")]\n",
975
+ " joblib.dump(summary_chunks, summary_chunks_path)\n",
976
+ "\n",
977
+ "# script_clean: word-based chunking\n",
978
+ "script_chunks_path = cache_dir / \"script_clean_chunk.joblib\"\n",
979
+ "if script_chunks_path.exists():\n",
980
+ " script_chunks = joblib.load(script_chunks_path)\n",
981
+ "else:\n",
982
+ " script_chunks = [chunk_text(s, max_words=400, overlap=80) for s in tqdm(df[\"script_clean\"].fillna(\"\").astype(str).tolist(), desc=\"chunking script_clean\")]\n",
983
+ " joblib.dump(script_chunks, script_chunks_path)\n",
984
+ "\n",
985
+ "# print stats for report\n",
986
+ "print(\"\\nchunking stats:\")\n",
987
+ "print(\"title :\", chunk_stats(title_chunks))\n",
988
+ "print(\"summary:\", chunk_stats(summary_chunks))\n",
989
+ "print(\"script :\", chunk_stats(script_chunks))\n"
990
+ ]
991
+ },
992
+ {
993
+ "cell_type": "code",
994
+ "execution_count": 75,
995
+ "id": "c828a946-20c7-4708-9d2a-8d423bcc230a",
996
+ "metadata": {},
997
+ "outputs": [
998
+ {
999
+ "name": "stdout",
1000
+ "output_type": "stream",
1001
+ "text": [
1002
+ "Movie: 8MM_1999\n",
1003
+ "\n",
1004
+ "Title chunks: ['8MM']\n",
1005
+ "\n",
1006
+ "Summary - number of chunks: 3\n",
1007
+ "Summary - first chunk:\n",
1008
+ "Private investigator Tom Welles is contacted by Daniel Longdale, attorney for wealthy widow Mrs. Christian, whose husband has recently died. While clearing out her late husband's safe, she and Longdal ...\n",
1009
+ "\n",
1010
+ "Script - number of chunks: 115\n",
1011
+ "Script - first chunk:\n",
1012
+ "INT. MIAMI AIRPORT, TERMINAL - DAY Amongst the weary tourist families and solitary businessmen sits TOM WELLES , middle - aged , hair neat , suit crisp and gray . He 's eating crackers from a cellopha ...\n"
1013
+ ]
1014
+ }
1015
+ ],
1016
+ "source": [
1017
+ "# check chunks e.g. movie 8MM\n",
1018
+ "i = 0 \n",
1019
+ "\n",
1020
+ "print(\"Movie:\", df.iloc[i][\"movie_name\"])\n",
1021
+ "print(\"\\nTitle chunks:\", title_chunks[i])\n",
1022
+ "\n",
1023
+ "print(\"\\nSummary - number of chunks:\", len(summary_chunks[i]))\n",
1024
+ "print(\"Summary - first chunk:\")\n",
1025
+ "print(summary_chunks[i][0][:200], \"...\")\n",
1026
+ "\n",
1027
+ "print(\"\\nScript - number of chunks:\", len(script_chunks[i]))\n",
1028
+ "print(\"Script - first chunk:\")\n",
1029
+ "print(script_chunks[i][0][:200], \"...\")\n"
1030
+ ]
1031
+ },
1032
+ {
1033
+ "cell_type": "code",
1034
+ "execution_count": 58,
1035
+ "id": "a730e151-3a99-4f18-9938-62cce849f19e",
1036
+ "metadata": {},
1037
+ "outputs": [],
1038
+ "source": [
1039
+ "# load model intfloat/e5-base-v2, GPU/FP16 if CUDA available (nvidia geforce RTX 3060, 12 GB VRAM)\n",
1040
+ "\n",
1041
+ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
1042
+ "embedder = SentenceTransformer(\"intfloat/e5-base-v2\", device=device)\n",
1043
+ "\n",
1044
+ "if device == \"cuda\":\n",
1045
+ " embedder = embedder.to(torch.float16) # FP16 float16 for faster run and less VRAM (instead of FP32)\n",
1046
+ "\n",
1047
+ "# encoding function for sentence embeddings\n",
1048
+ "def encode_chunks(chunks, batch_size=96):\n",
1049
+ " \n",
1050
+ " texts = [f\"query: {c}\" for c in chunks] # präfix: \"query:\" for classification as learned from https://huggingface.co/intfloat/e5-base-v2\n",
1051
+ " embs = embedder.encode(\n",
1052
+ " texts,\n",
1053
+ " batch_size=batch_size, # batch_size = 96 should work\n",
1054
+ " normalize_embeddings=True,\n",
1055
+ " show_progress_bar=False\n",
1056
+ " )\n",
1057
+ " return np.asarray(embs, dtype=np.float32) # float32 is standard, probably even 64bit internal is slower\n",
1058
+ "\n",
1059
+ "# mean max pooling function and normalize L2\n",
1060
+ "def pool_mean_max(embs: np.ndarray):\n",
1061
+ " \n",
1062
+ " mu = embs.mean(axis=0)\n",
1063
+ " mx = embs.max(axis=0)\n",
1064
+ " vec = np.concatenate([mu, mx], axis=0)\n",
1065
+ " n = np.linalg.norm(vec) + 1e-12\n",
1066
+ " return (vec / n).astype(np.float32)\n",
1067
+ " \n",
1068
+ " "
1069
+ ]
1070
+ },
1071
+ {
1072
+ "cell_type": "code",
1073
+ "execution_count": 82,
1074
+ "id": "c8d68160-5679-4d2a-84fe-5c2a52018164",
1075
+ "metadata": {},
1076
+ "outputs": [
1077
+ {
1078
+ "name": "stdout",
1079
+ "output_type": "stream",
1080
+ "text": [
1081
+ "Loaded from cache: emb_cache\\emb_title.joblib Shape: (2200, 1536)\n",
1082
+ "Loaded from cache: emb_cache\\emb_summary.joblib Shape: (2200, 1536)\n",
1083
+ "Loaded from cache: emb_cache\\emb_script.joblib Shape: (2200, 1536)\n",
1084
+ "\n",
1085
+ "Embeddings done:\n",
1086
+ " X_title : (2200, 1536) float32\n",
1087
+ " X_summary: (2200, 1536) float32\n",
1088
+ " X_script : (2200, 1536) float32\n"
1089
+ ]
1090
+ }
1091
+ ],
1092
+ "source": [
1093
+ "\n",
1094
+ "emb_dir = Path(\"emb_cache\")\n",
1095
+ "emb_dir.mkdir(parents=True, exist_ok=True)\n",
1096
+ "\n",
1097
+ "def compute_script_embeddings(chunks_per_doc, cache_file, batch_size=96, desc=\"\"):\n",
1098
+ " \n",
1099
+ " cache_path = emb_dir / cache_file # make path of emb_dir and \"cache_file\"\n",
1100
+ " if cache_path.exists():\n",
1101
+ " X = joblib.load(cache_path)\n",
1102
+ " print(f\"Loaded from cache: {cache_path} Shape: {X.shape}\")\n",
1103
+ " return X\n",
1104
+ "\n",
1105
+ " vecs = []\n",
1106
+ " for chs in tqdm(chunks_per_doc, desc=desc):\n",
1107
+ " embs = encode_chunks(chs, batch_size=batch_size) # E5, FP16 encoding\n",
1108
+ " vec = pool_mean_max(embs) # mean, max, L2-normalize\n",
1109
+ " vecs.append(vec)\n",
1110
+ " X = np.vstack(vecs).astype(np.float32)\n",
1111
+ " joblib.dump(X, cache_path) # store embeddings in joblib\n",
1112
+ " print(f\"Stored: {cache_path} Shape: {X.shape}\")\n",
1113
+ " return X\n",
1114
+ " \n",
1115
+ "\n",
1116
+ "# titel embeddings\n",
1117
+ "X_title = compute_script_embeddings(\n",
1118
+ " title_chunks, # from joblib\n",
1119
+ " cache_file=\"emb_title.joblib\",\n",
1120
+ " batch_size=256, # 256 batch size is possible\n",
1121
+ " desc=\"encode title\"\n",
1122
+ ")\n",
1123
+ "\n",
1124
+ "# summary embeddings\n",
1125
+ "X_summary = compute_script_embeddings(\n",
1126
+ " summary_chunks, # from joblib\n",
1127
+ " cache_file=\"emb_summary.joblib\",\n",
1128
+ " batch_size=96, # default batch size\n",
1129
+ " desc=\"encode summary\"\n",
1130
+ ")\n",
1131
+ "\n",
1132
+ "# script_clean embeddings\n",
1133
+ "X_script = compute_script_embeddings(\n",
1134
+ " script_chunks, # from joblib\n",
1135
+ " cache_file=\"emb_script.joblib\",\n",
1136
+ " batch_size=96, # default batch size\n",
1137
+ " desc=\"encode script\"\n",
1138
+ ")\n",
1139
+ "\n",
1140
+ "# check embeddings \n",
1141
+ "print(\"\\nEmbeddings done:\")\n",
1142
+ "print(\" X_title :\", X_title.shape, X_title.dtype)\n",
1143
+ "print(\" X_summary:\", X_summary.shape, X_summary.dtype)\n",
1144
+ "print(\" X_script :\", X_script.shape, X_script.dtype)\n"
1145
+ ]
1146
+ },
1147
+ {
1148
+ "cell_type": "code",
1149
+ "execution_count": 60,
1150
+ "id": "9f6eb2b1-58b4-4edc-a452-0965b6789737",
1151
+ "metadata": {},
1152
+ "outputs": [
1153
+ {
1154
+ "name": "stdout",
1155
+ "output_type": "stream",
1156
+ "text": [
1157
+ "Shapes:\n",
1158
+ " title : (2200, 1536)\n",
1159
+ " summary: (2200, 1536)\n",
1160
+ " script : (2200, 1536)\n",
1161
+ "Split sizes: 1320 440 440\n"
1162
+ ]
1163
+ }
1164
+ ],
1165
+ "source": [
1166
+ "# load embeddings and splits\n",
1167
+ "\n",
1168
+ "emb_dir = Path(\"emb_cache\")\n",
1169
+ "\n",
1170
+ "X_title = joblib.load(emb_dir / \"emb_title.joblib\") # all titel!!! -> train, val, test indexed!!!\n",
1171
+ "X_summary = joblib.load(emb_dir / \"emb_summary.joblib\") # all summaries!!! -> train, val, test indexed!!!\n",
1172
+ "X_script = joblib.load(emb_dir / \"emb_script.joblib\") # all scripts!!! -> train, val, test indexed!!!\n",
1173
+ "\n",
1174
+ "print(\"Shapes:\")\n",
1175
+ "print(\" title :\", X_title.shape)\n",
1176
+ "print(\" summary:\", X_summary.shape)\n",
1177
+ "print(\" script :\", X_script.shape)\n",
1178
+ "\n",
1179
+ "\n",
1180
+ "y_all = df[\"nominated\"].astype(int).to_numpy() # all scripts nominated: 0 or 1 -> indexed!!!\n",
1181
+ "\n",
1182
+ "split = np.load(\"data/splits/split_60_20_20.npz\")\n",
1183
+ "idx_train, idx_val, idx_test = split[\"idx_train\"], split[\"idx_val\"], split[\"idx_test\"]\n",
1184
+ "\n",
1185
+ "# check splits 60/20/20\n",
1186
+ "print(\"Split sizes:\", len(idx_train), len(idx_val), len(idx_test))"
1187
+ ]
1188
+ },
1189
+ {
1190
+ "cell_type": "code",
1191
+ "execution_count": 83,
1192
+ "id": "e4cbc61e-d514-4381-94e5-23dcfe9b8a3d",
1193
+ "metadata": {},
1194
+ "outputs": [],
1195
+ "source": [
1196
+ "# train, val, test logistic regression function on variants: single features or combined features\n",
1197
+ "\n",
1198
+ "\n",
1199
+ "def train_eval_variant(X, y_all, idx_train, idx_val, idx_test, name=\"VARIANT\"):\n",
1200
+ " # use index for splits\n",
1201
+ " X_train, X_val, X_test = X[idx_train], X[idx_val], X[idx_test]\n",
1202
+ " y_train, y_val, y_test = y_all[idx_train], y_all[idx_val], y_all[idx_test]\n",
1203
+ "\n",
1204
+ " # logistic regression\n",
1205
+ " clf = LogisticRegression(\n",
1206
+ " max_iter=5000, # hyperparameter described in report paper\n",
1207
+ " class_weight=\"balanced\",\n",
1208
+ " C=1.0,\n",
1209
+ " n_jobs=4 # use 4 cpu kernel if possible\n",
1210
+ " )\n",
1211
+ "\n",
1212
+ " # training\n",
1213
+ " clf.fit(X_train, y_train)\n",
1214
+ "\n",
1215
+ " # threshold tuning\n",
1216
+ " val_probs = clf.predict_proba(X_val)[:, 1]\n",
1217
+ " ths = np.linspace(0.05, 0.95, 181) # step size 0,005!\n",
1218
+ " f1s = [f1_score(y_val, (val_probs >= t).astype(int)) for t in ths]\n",
1219
+ " best_t = ths[int(np.argmax(f1s))]\n",
1220
+ "\n",
1221
+ " # predicting\n",
1222
+ " test_probs = clf.predict_proba(X_test)[:, 1]\n",
1223
+ " test_pred = (test_probs >= best_t).astype(int)\n",
1224
+ "\n",
1225
+ " acc = accuracy_score(y_test, test_pred)\n",
1226
+ " roc = roc_auc_score(y_test, test_probs)\n",
1227
+ " pr = average_precision_score(y_test, test_probs) # PR-AUC\n",
1228
+ " f1_p = f1_score(y_test, test_pred, pos_label=1)\n",
1229
+ " f1_n = f1_score(y_test, test_pred, pos_label=0)\n",
1230
+ " f1_m = 0.5 * (f1_p + f1_n)\n",
1231
+ "\n",
1232
+ "\n",
1233
+ " # data for reports table\n",
1234
+ " print(f\"\\n=== {name} ===\")\n",
1235
+ " print(f\"Best val threshold: {best_t:.2f}\")\n",
1236
+ " print(\"Accuracy :\", acc)\n",
1237
+ " print(\"ROC-AUC :\", roc)\n",
1238
+ " print(\"PR-AUC :\", pr)\n",
1239
+ " print(\"F1_pos :\", f1_p)\n",
1240
+ " print(\"F1_neg :\", f1_n)\n",
1241
+ " print(\"Macro-F1 :\", f1_m)\n",
1242
+ "\n",
1243
+ " # return results for further use\n",
1244
+ " return {\n",
1245
+ " \"variant\": name,\n",
1246
+ " \"best_t_val\": float(best_t),\n",
1247
+ " \"ACC_test\": float(acc),\n",
1248
+ " \"ROC_AUC_test\": float(roc),\n",
1249
+ " \"PR_AUC_test\": float(pr),\n",
1250
+ " \"F1_pos_test\": float(f1_p),\n",
1251
+ " \"F1_neg_test\": float(f1_n),\n",
1252
+ " \"Macro_F1_test\": float(f1_m),\n",
1253
+ " \"model\": clf,\n",
1254
+ " }\n",
1255
+ "\n"
1256
+ ]
1257
+ },
1258
+ {
1259
+ "cell_type": "code",
1260
+ "execution_count": 86,
1261
+ "id": "72fabc00-40d8-4ed5-a88e-319862d296f7",
1262
+ "metadata": {},
1263
+ "outputs": [
1264
+ {
1265
+ "name": "stdout",
1266
+ "output_type": "stream",
1267
+ "text": [
1268
+ "\n",
1269
+ "=== TITLE ===\n",
1270
+ "Best val threshold: 0.49\n",
1271
+ "Accuracy : 0.6704545454545454\n",
1272
+ "ROC-AUC : 0.6919104991394148\n",
1273
+ "PR-AUC : 0.34683336198805464\n",
1274
+ "F1_pos : 0.3881856540084388\n",
1275
+ "F1_neg : 0.7744945567651633\n",
1276
+ "Macro-F1 : 0.581340105386801\n",
1277
+ "\n",
1278
+ "=== SCRIPT ===\n",
1279
+ "Best val threshold: 0.52\n",
1280
+ "Accuracy : 0.7318181818181818\n",
1281
+ "ROC-AUC : 0.7419256859370255\n",
1282
+ "PR-AUC : 0.4154771297314072\n",
1283
+ "F1_pos : 0.42718446601941745\n",
1284
+ "F1_neg : 0.8249258160237388\n",
1285
+ "Macro-F1 : 0.6260551410215781\n",
1286
+ "\n",
1287
+ "=== SUMMARY ===\n",
1288
+ "Best val threshold: 0.50\n",
1289
+ "Accuracy : 0.6931818181818182\n",
1290
+ "ROC-AUC : 0.7537038911950322\n",
1291
+ "PR-AUC : 0.4243557395687297\n",
1292
+ "F1_pos : 0.4444444444444444\n",
1293
+ "F1_neg : 0.7880690737833596\n",
1294
+ "Macro-F1 : 0.616256759113902\n",
1295
+ "\n",
1296
+ "=== SCRIPT+SUMMARY ===\n",
1297
+ "Best val threshold: 0.51\n",
1298
+ "Accuracy : 0.7340909090909091\n",
1299
+ "ROC-AUC : 0.7675744996793898\n",
1300
+ "PR-AUC : 0.450240232910733\n",
1301
+ "F1_pos : 0.4845814977973568\n",
1302
+ "F1_neg : 0.8208269525267994\n",
1303
+ "Macro-F1 : 0.652704225162078\n",
1304
+ "\n",
1305
+ "=== SCRIPT+SUMMARY+TITLE ===\n",
1306
+ "Best val threshold: 0.52\n",
1307
+ "Accuracy : 0.759090909090909\n",
1308
+ "ROC-AUC : 0.7900172117039588\n",
1309
+ "PR-AUC : 0.45534767650738134\n",
1310
+ "F1_pos : 0.4854368932038835\n",
1311
+ "F1_neg : 0.8427299703264095\n",
1312
+ "Macro-F1 : 0.6640834317651465\n"
1313
+ ]
1314
+ }
1315
+ ],
1316
+ "source": [
1317
+ "# result computing for feature variants\n",
1318
+ "\n",
1319
+ "res_title = train_eval_variant(X_title, y_all, idx_train, idx_val, idx_test, name=\"TITLE\")\n",
1320
+ "\n",
1321
+ "res_script = train_eval_variant(X_script, y_all, idx_train, idx_val, idx_test, name=\"SCRIPT\")\n",
1322
+ "\n",
1323
+ "res_summary = train_eval_variant(X_summary, y_all, idx_train, idx_val, idx_test, name=\"SUMMARY\")\n",
1324
+ "\n",
1325
+ "res_script_summary = train_eval_variant(np.hstack([X_script, X_summary]), y_all, idx_train, idx_val, idx_test, name=\"SCRIPT+SUMMARY\") # horizontal numpy stack for combination\n",
1326
+ "\n",
1327
+ "res_script_summary_title = train_eval_variant(np.hstack([X_script, X_summary, X_title]), y_all, idx_train, idx_val, idx_test, name=\"SCRIPT+SUMMARY+TITLE\") # horizontal numpy stack for combination\n"
1328
+ ]
1329
+ },
1330
+ {
1331
+ "cell_type": "code",
1332
+ "execution_count": 87,
1333
+ "id": "3e4acd0a-4754-43cb-a674-7e71a41c9a22",
1334
+ "metadata": {},
1335
+ "outputs": [
1336
+ {
1337
+ "name": "stdout",
1338
+ "output_type": "stream",
1339
+ "text": [
1340
+ " variant best_t_val ACC_test ROC_AUC_test PR_AUC_test \\\n",
1341
+ "4 SCRIPT+SUMMARY+TITLE 0.520 0.759091 0.790017 0.455348 \n",
1342
+ "3 SCRIPT+SUMMARY 0.510 0.734091 0.767574 0.450240 \n",
1343
+ "2 SUMMARY 0.500 0.693182 0.753704 0.424356 \n",
1344
+ "1 SCRIPT 0.520 0.731818 0.741926 0.415477 \n",
1345
+ "0 TITLE 0.495 0.670455 0.691910 0.346833 \n",
1346
+ "\n",
1347
+ " F1_pos_test F1_neg_test Macro_F1_test \n",
1348
+ "4 0.485437 0.842730 0.664083 \n",
1349
+ "3 0.484581 0.820827 0.652704 \n",
1350
+ "2 0.444444 0.788069 0.616257 \n",
1351
+ "1 0.427184 0.824926 0.626055 \n",
1352
+ "0 0.388186 0.774495 0.581340 \n"
1353
+ ]
1354
+ }
1355
+ ],
1356
+ "source": [
1357
+ "\n",
1358
+ "\n",
1359
+ "results_df = pd.DataFrame([\n",
1360
+ " res_title, res_script, res_summary,\n",
1361
+ " res_script_summary, res_script_summary_title\n",
1362
+ "])\n",
1363
+ "\n",
1364
+ "# overview\n",
1365
+ "print(results_df.drop(columns=[\"model\"]).sort_values(\"PR_AUC_test\", ascending=False))\n"
1366
+ ]
1367
+ },
1368
+ {
1369
+ "cell_type": "code",
1370
+ "execution_count": null,
1371
+ "id": "7bd1794c-503b-48c7-b553-e04e8202bb4f",
1372
+ "metadata": {},
1373
+ "outputs": [],
1374
+ "source": []
1375
+ }
1376
+ ],
1377
+ "metadata": {
1378
+ "kernelspec": {
1379
+ "display_name": "Python 3 (ipykernel)",
1380
+ "language": "python",
1381
+ "name": "python3"
1382
+ },
1383
+ "language_info": {
1384
+ "codemirror_mode": {
1385
+ "name": "ipython",
1386
+ "version": 3
1387
+ },
1388
+ "file_extension": ".py",
1389
+ "mimetype": "text/x-python",
1390
+ "name": "python",
1391
+ "nbconvert_exporter": "python",
1392
+ "pygments_lexer": "ipython3",
1393
+ "version": "3.12.7"
1394
+ }
1395
+ },
1396
+ "nbformat": 4,
1397
+ "nbformat_minor": 5
1398
+ }