Aghiless commited on
Commit
ef6f28d
·
verified ·
1 Parent(s): 9ef06fa

Upload 3 files

Browse files
1_Data_Creation (2).ipynb ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "4ba6aba8"
7
+ },
8
+ "source": [
9
+ "# 🤖 **Data Collection, Creation, Storage, and Processing**\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "jpASMyIQMaAq"
16
+ },
17
+ "source": [
18
+ "## **1.** 📦 Install required packages"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 1,
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "f48c8f8c",
29
+ "outputId": "13d0dd5e-82c6-489f-b1f0-e970186a4eb7"
30
+ },
31
+ "outputs": [
32
+ {
33
+ "output_type": "stream",
34
+ "name": "stdout",
35
+ "text": [
36
+ "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (4.13.5)\n",
37
+ "Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (2.2.2)\n",
38
+ "Requirement already satisfied: matplotlib in /usr/local/lib/python3.12/dist-packages (3.10.0)\n",
39
+ "Requirement already satisfied: seaborn in /usr/local/lib/python3.12/dist-packages (0.13.2)\n",
40
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (2.0.2)\n",
41
+ "Requirement already satisfied: textblob in /usr/local/lib/python3.12/dist-packages (0.19.0)\n",
42
+ "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (2.8.3)\n",
43
+ "Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (4.15.0)\n",
44
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas) (2.9.0.post0)\n",
45
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.2)\n",
46
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.3)\n",
47
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.3.3)\n",
48
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (0.12.1)\n",
49
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (4.61.1)\n",
50
+ "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.4.9)\n",
51
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (26.0)\n",
52
+ "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (11.3.0)\n",
53
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (3.3.2)\n",
54
+ "Requirement already satisfied: nltk>=3.9 in /usr/local/lib/python3.12/dist-packages (from textblob) (3.9.1)\n",
55
+ "Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (8.3.1)\n",
56
+ "Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (1.5.3)\n",
57
+ "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (2025.11.3)\n",
58
+ "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (4.67.3)\n",
59
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)\n"
60
+ ]
61
+ }
62
+ ],
63
+ "source": [
64
+ "!pip install beautifulsoup4 pandas matplotlib seaborn numpy textblob"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "lquNYCbfL9IM"
71
+ },
72
+ "source": [
73
+ "## **2.** ⛏ Web-scrape all book titles, prices, and ratings from books.toscrape.com"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "markdown",
78
+ "metadata": {
79
+ "id": "0IWuNpxxYDJF"
80
+ },
81
+ "source": [
82
+ "### *a. Initial setup*\n",
83
+ "Define the base url of the website you will scrape as well as how and what you will scrape"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 2,
89
+ "metadata": {
90
+ "id": "91d52125"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import requests\n",
95
+ "from bs4 import BeautifulSoup\n",
96
+ "import pandas as pd\n",
97
+ "import time\n",
98
+ "\n",
99
+ "base_url = \"https://books.toscrape.com/catalogue/page-{}.html\"\n",
100
+ "headers = {\"User-Agent\": \"Mozilla/5.0\"}\n",
101
+ "\n",
102
+ "titles, prices, ratings = [], [], []"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "metadata": {
108
+ "id": "oCdTsin2Yfp3"
109
+ },
110
+ "source": [
111
+ "### *b. Fill titles, prices, and ratings from the web pages*"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 3,
117
+ "metadata": {
118
+ "id": "xqO5Y3dnYhxt"
119
+ },
120
+ "outputs": [],
121
+ "source": [
122
+ "# Loop through all 50 pages\n",
123
+ "for page in range(1, 51):\n",
124
+ " url = base_url.format(page)\n",
125
+ " response = requests.get(url, headers=headers)\n",
126
+ " soup = BeautifulSoup(response.content, \"html.parser\")\n",
127
+ " books = soup.find_all(\"article\", class_=\"product_pod\")\n",
128
+ "\n",
129
+ " for book in books:\n",
130
+ " titles.append(book.h3.a[\"title\"])\n",
131
+ " prices.append(float(book.find(\"p\", class_=\"price_color\").text[1:]))\n",
132
+ " ratings.append(book.p.get(\"class\")[1])\n",
133
+ "\n",
134
+ " time.sleep(0.5) # polite scraping delay"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "markdown",
139
+ "metadata": {
140
+ "id": "T0TOeRC4Yrnn"
141
+ },
142
+ "source": [
143
+ "### *c. ✋🏻🛑⛔️ Create a dataframe df_books that contains the now complete \"title\", \"price\", and \"rating\" objects*"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": 4,
149
+ "metadata": {
150
+ "id": "l5FkkNhUYTHh"
151
+ },
152
+ "outputs": [],
153
+ "source": []
154
+ },
155
+ {
156
+ "cell_type": "markdown",
157
+ "metadata": {
158
+ "id": "duI5dv3CZYvF"
159
+ },
160
+ "source": [
161
+ "### *d. Save web-scraped dataframe either as a CSV or Excel file*"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 5,
167
+ "metadata": {
168
+ "id": "lC1U_YHtZifh"
169
+ },
170
+ "outputs": [],
171
+ "source": [
172
+ "# 💾 Save to CSV\n",
173
+ "df_books.to_csv(\"books_data.csv\", index=False)\n",
174
+ "\n",
175
+ "# 💾 Or save to Excel\n",
176
+ "# df_books.to_excel(\"books_data.xlsx\", index=False)"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "markdown",
181
+ "metadata": {
182
+ "id": "qMjRKMBQZlJi"
183
+ },
184
+ "source": [
185
+ "### *e. ✋🏻🛑⛔️ View first fiew lines*"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": 6,
191
+ "metadata": {
192
+ "colab": {
193
+ "base_uri": "https://localhost:8080/",
194
+ "height": 206
195
+ },
196
+ "id": "O_wIvTxYZqCK",
197
+ "outputId": "349b36b0-c008-4fd5-d4a4-dba38ae18337"
198
+ },
199
+ "outputs": [
200
+ {
201
+ "output_type": "execute_result",
202
+ "data": {
203
+ "text/plain": [
204
+ " title price rating\n",
205
+ "0 A Light in the Attic 51.77 Three\n",
206
+ "1 Tipping the Velvet 53.74 One\n",
207
+ "2 Soumission 50.10 One\n",
208
+ "3 Sharp Objects 47.82 Four\n",
209
+ "4 Sapiens: A Brief History of Humankind 54.23 Five"
210
+ ],
211
+ "text/html": [
212
+ "\n",
213
+ " <div id=\"df-04c87660-4415-45e9-ad3b-3fa19d9402c2\" class=\"colab-df-container\">\n",
214
+ " <div>\n",
215
+ "<style scoped>\n",
216
+ " .dataframe tbody tr th:only-of-type {\n",
217
+ " vertical-align: middle;\n",
218
+ " }\n",
219
+ "\n",
220
+ " .dataframe tbody tr th {\n",
221
+ " vertical-align: top;\n",
222
+ " }\n",
223
+ "\n",
224
+ " .dataframe thead th {\n",
225
+ " text-align: right;\n",
226
+ " }\n",
227
+ "</style>\n",
228
+ "<table border=\"1\" class=\"dataframe\">\n",
229
+ " <thead>\n",
230
+ " <tr style=\"text-align: right;\">\n",
231
+ " <th></th>\n",
232
+ " <th>title</th>\n",
233
+ " <th>price</th>\n",
234
+ " <th>rating</th>\n",
235
+ " </tr>\n",
236
+ " </thead>\n",
237
+ " <tbody>\n",
238
+ " <tr>\n",
239
+ " <th>0</th>\n",
240
+ " <td>A Light in the Attic</td>\n",
241
+ " <td>51.77</td>\n",
242
+ " <td>Three</td>\n",
243
+ " </tr>\n",
244
+ " <tr>\n",
245
+ " <th>1</th>\n",
246
+ " <td>Tipping the Velvet</td>\n",
247
+ " <td>53.74</td>\n",
248
+ " <td>One</td>\n",
249
+ " </tr>\n",
250
+ " <tr>\n",
251
+ " <th>2</th>\n",
252
+ " <td>Soumission</td>\n",
253
+ " <td>50.10</td>\n",
254
+ " <td>One</td>\n",
255
+ " </tr>\n",
256
+ " <tr>\n",
257
+ " <th>3</th>\n",
258
+ " <td>Sharp Objects</td>\n",
259
+ " <td>47.82</td>\n",
260
+ " <td>Four</td>\n",
261
+ " </tr>\n",
262
+ " <tr>\n",
263
+ " <th>4</th>\n",
264
+ " <td>Sapiens: A Brief History of Humankind</td>\n",
265
+ " <td>54.23</td>\n",
266
+ " <td>Five</td>\n",
267
+ " </tr>\n",
268
+ " </tbody>\n",
269
+ "</table>\n",
270
+ "</div>\n",
271
+ " <div class=\"colab-df-buttons\">\n",
272
+ "\n",
273
+ " <div class=\"colab-df-container\">\n",
274
+ " <button class=\"colab-df-convert\" onclick=\"convertToInteractive('df-04c87660-4415-45e9-ad3b-3fa19d9402c2')\"\n",
275
+ " title=\"Convert this dataframe to an interactive table.\"\n",
276
+ " style=\"display:none;\">\n",
277
+ "\n",
278
+ " <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\" viewBox=\"0 -960 960 960\">\n",
279
+ " <path d=\"M120-120v-720h720v720H120Zm60-500h600v-160H180v160Zm220 220h160v-160H400v160Zm0 220h160v-160H400v160ZM180-400h160v-160H180v160Zm440 0h160v-160H620v160ZM180-180h160v-160H180v160Zm440 0h160v-160H620v160Z\"/>\n",
280
+ " </svg>\n",
281
+ " </button>\n",
282
+ "\n",
283
+ " <style>\n",
284
+ " .colab-df-container {\n",
285
+ " display:flex;\n",
286
+ " gap: 12px;\n",
287
+ " }\n",
288
+ "\n",
289
+ " .colab-df-convert {\n",
290
+ " background-color: #E8F0FE;\n",
291
+ " border: none;\n",
292
+ " border-radius: 50%;\n",
293
+ " cursor: pointer;\n",
294
+ " display: none;\n",
295
+ " fill: #1967D2;\n",
296
+ " height: 32px;\n",
297
+ " padding: 0 0 0 0;\n",
298
+ " width: 32px;\n",
299
+ " }\n",
300
+ "\n",
301
+ " .colab-df-convert:hover {\n",
302
+ " background-color: #E2EBFA;\n",
303
+ " box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
304
+ " fill: #174EA6;\n",
305
+ " }\n",
306
+ "\n",
307
+ " .colab-df-buttons div {\n",
308
+ " margin-bottom: 4px;\n",
309
+ " }\n",
310
+ "\n",
311
+ " [theme=dark] .colab-df-convert {\n",
312
+ " background-color: #3B4455;\n",
313
+ " fill: #D2E3FC;\n",
314
+ " }\n",
315
+ "\n",
316
+ " [theme=dark] .colab-df-convert:hover {\n",
317
+ " background-color: #434B5C;\n",
318
+ " box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
319
+ " filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
320
+ " fill: #FFFFFF;\n",
321
+ " }\n",
322
+ " </style>\n",
323
+ "\n",
324
+ " <script>\n",
325
+ " const buttonEl =\n",
326
+ " document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2 button.colab-df-convert');\n",
327
+ " buttonEl.style.display =\n",
328
+ " google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
329
+ "\n",
330
+ " async function convertToInteractive(key) {\n",
331
+ " const element = document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2');\n",
332
+ " const dataTable =\n",
333
+ " await google.colab.kernel.invokeFunction('convertToInteractive',\n",
334
+ " [key], {});\n",
335
+ " if (!dataTable) return;\n",
336
+ "\n",
337
+ " const docLinkHtml = 'Like what you see? Visit the ' +\n",
338
+ " '<a target=\"_blank\" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'\n",
339
+ " + ' to learn more about interactive tables.';\n",
340
+ " element.innerHTML = '';\n",
341
+ " dataTable['output_type'] = 'display_data';\n",
342
+ " await google.colab.output.renderOutput(dataTable, element);\n",
343
+ " const docLink = document.createElement('div');\n",
344
+ " docLink.innerHTML = docLinkHtml;\n",
345
+ " element.appendChild(docLink);\n",
346
+ " }\n",
347
+ " </script>\n",
348
+ " </div>\n",
349
+ "\n",
350
+ "\n",
351
+ " </div>\n",
352
+ " </div>\n"
353
+ ],
354
+ "application/vnd.google.colaboratory.intrinsic+json": {
355
+ "type": "dataframe",
356
+ "variable_name": "df_books",
357
+ "summary": "{\n \"name\": \"df_books\",\n \"rows\": 1000,\n \"fields\": [\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 999,\n \"samples\": [\n \"The Grownup\",\n \"Persepolis: The Story of a Childhood (Persepolis #1-2)\",\n \"Ayumi's Violin\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 14.446689669952772,\n \"min\": 10.0,\n \"max\": 59.99,\n \"num_unique_values\": 903,\n \"samples\": [\n 19.73,\n 55.65,\n 46.31\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"rating\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"One\",\n \"Two\",\n \"Four\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
358
+ }
359
+ },
360
+ "metadata": {},
361
+ "execution_count": 6
362
+ }
363
+ ],
364
+ "source": []
365
+ },
366
+ {
367
+ "cell_type": "markdown",
368
+ "metadata": {
369
+ "id": "p-1Pr2szaqLk"
370
+ },
371
+ "source": [
372
+ "## **3.** 🧩 Create a meaningful connection between real & synthetic datasets"
373
+ ]
374
+ },
375
+ {
376
+ "cell_type": "markdown",
377
+ "metadata": {
378
+ "id": "SIaJUGIpaH4V"
379
+ },
380
+ "source": [
381
+ "### *a. Initial setup*"
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "code",
386
+ "execution_count": 7,
387
+ "metadata": {
388
+ "id": "-gPXGcRPuV_9"
389
+ },
390
+ "outputs": [],
391
+ "source": [
392
+ "import numpy as np\n",
393
+ "import random\n",
394
+ "from datetime import datetime\n",
395
+ "import warnings\n",
396
+ "\n",
397
+ "warnings.filterwarnings(\"ignore\")\n",
398
+ "random.seed(2025)\n",
399
+ "np.random.seed(2025)"
400
+ ]
401
+ },
402
+ {
403
+ "cell_type": "markdown",
404
+ "metadata": {
405
+ "id": "pY4yCoIuaQqp"
406
+ },
407
+ "source": [
408
+ "### *b. Generate popularity scores based on rating (with some randomness) with a generate_popularity_score function*"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 8,
414
+ "metadata": {
415
+ "id": "mnd5hdAbaNjz"
416
+ },
417
+ "outputs": [],
418
+ "source": [
419
+ "def generate_popularity_score(rating):\n",
420
+ " base = {\"One\": 2, \"Two\": 3, \"Three\": 3, \"Four\": 4, \"Five\": 4}.get(rating, 3)\n",
421
+ " trend_factor = random.choices([-1, 0, 1], weights=[1, 3, 2])[0]\n",
422
+ " return int(np.clip(base + trend_factor, 1, 5))"
423
+ ]
424
+ },
425
+ {
426
+ "cell_type": "markdown",
427
+ "metadata": {
428
+ "id": "n4-TaNTFgPak"
429
+ },
430
+ "source": [
431
+ "### *c. ✋🏻🛑⛔️ Run the function to create a \"popularity_score\" column from \"rating\"*"
432
+ ]
433
+ },
434
+ {
435
+ "cell_type": "code",
436
+ "execution_count": 9,
437
+ "metadata": {
438
+ "id": "V-G3OCUCgR07"
439
+ },
440
+ "outputs": [],
441
+ "source": []
442
+ },
443
+ {
444
+ "cell_type": "markdown",
445
+ "metadata": {
446
+ "id": "HnngRNTgacYt"
447
+ },
448
+ "source": [
449
+ "### *d. Decide on the sentiment_label based on the popularity score with a get_sentiment function*"
450
+ ]
451
+ },
452
+ {
453
+ "cell_type": "code",
454
+ "execution_count": 10,
455
+ "metadata": {
456
+ "id": "kUtWmr8maZLZ"
457
+ },
458
+ "outputs": [],
459
+ "source": [
460
+ "def get_sentiment(popularity_score):\n",
461
+ " if popularity_score <= 2:\n",
462
+ " return \"negative\"\n",
463
+ " elif popularity_score == 3:\n",
464
+ " return \"neutral\"\n",
465
+ " else:\n",
466
+ " return \"positive\""
467
+ ]
468
+ },
469
+ {
470
+ "cell_type": "markdown",
471
+ "metadata": {
472
+ "id": "HF9F9HIzgT7Z"
473
+ },
474
+ "source": [
475
+ "### *e. ✋🏻🛑⛔️ Run the function to create a \"sentiment_label\" column from \"popularity_score\"*"
476
+ ]
477
+ },
478
+ {
479
+ "cell_type": "code",
480
+ "execution_count": 11,
481
+ "metadata": {
482
+ "id": "tafQj8_7gYCG"
483
+ },
484
+ "outputs": [],
485
+ "source": []
486
+ },
487
+ {
488
+ "cell_type": "markdown",
489
+ "metadata": {
490
+ "id": "T8AdKkmASq9a"
491
+ },
492
+ "source": [
493
+ "## **4.** 📈 Generate synthetic book sales data of 18 months"
494
+ ]
495
+ },
496
+ {
497
+ "cell_type": "markdown",
498
+ "metadata": {
499
+ "id": "OhXbdGD5fH0c"
500
+ },
501
+ "source": [
502
+ "### *a. Create a generate_sales_profit function that would generate sales patterns based on sentiment_label (with some randomness)*"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "execution_count": 12,
508
+ "metadata": {
509
+ "id": "qkVhYPXGbgEn"
510
+ },
511
+ "outputs": [],
512
+ "source": [
513
+ "def generate_sales_profile(sentiment):\n",
514
+ " months = pd.date_range(end=datetime.today(), periods=18, freq=\"M\")\n",
515
+ "\n",
516
+ " if sentiment == \"positive\":\n",
517
+ " base = random.randint(200, 300)\n",
518
+ " trend = np.linspace(base, base + random.randint(20, 60), len(months))\n",
519
+ " elif sentiment == \"negative\":\n",
520
+ " base = random.randint(20, 80)\n",
521
+ " trend = np.linspace(base, base - random.randint(10, 30), len(months))\n",
522
+ " else: # neutral\n",
523
+ " base = random.randint(80, 160)\n",
524
+ " trend = np.full(len(months), base + random.randint(-10, 10))\n",
525
+ "\n",
526
+ " seasonality = 10 * np.sin(np.linspace(0, 3 * np.pi, len(months)))\n",
527
+ " noise = np.random.normal(0, 5, len(months))\n",
528
+ " monthly_sales = np.clip(trend + seasonality + noise, a_min=0, a_max=None).astype(int)\n",
529
+ "\n",
530
+ " return list(zip(months.strftime(\"%Y-%m\"), monthly_sales))"
531
+ ]
532
+ },
533
+ {
534
+ "cell_type": "markdown",
535
+ "metadata": {
536
+ "id": "L2ak1HlcgoTe"
537
+ },
538
+ "source": [
539
+ "### *b. Run the function as part of building sales_data*"
540
+ ]
541
+ },
542
+ {
543
+ "cell_type": "code",
544
+ "execution_count": 13,
545
+ "metadata": {
546
+ "id": "SlJ24AUafoDB"
547
+ },
548
+ "outputs": [],
549
+ "source": [
550
+ "sales_data = []\n",
551
+ "for _, row in df_books.iterrows():\n",
552
+ " records = generate_sales_profile(row[\"sentiment_label\"])\n",
553
+ " for month, units in records:\n",
554
+ " sales_data.append({\n",
555
+ " \"title\": row[\"title\"],\n",
556
+ " \"month\": month,\n",
557
+ " \"units_sold\": units,\n",
558
+ " \"sentiment_label\": row[\"sentiment_label\"]\n",
559
+ " })"
560
+ ]
561
+ },
562
+ {
563
+ "cell_type": "markdown",
564
+ "metadata": {
565
+ "id": "4IXZKcCSgxnq"
566
+ },
567
+ "source": [
568
+ "### *c. ✋🏻🛑⛔️ Create a df_sales DataFrame from sales_data*"
569
+ ]
570
+ },
571
+ {
572
+ "cell_type": "code",
573
+ "execution_count": 14,
574
+ "metadata": {
575
+ "id": "wcN6gtiZg-ws"
576
+ },
577
+ "outputs": [],
578
+ "source": []
579
+ },
580
+ {
581
+ "cell_type": "markdown",
582
+ "metadata": {
583
+ "id": "EhIjz9WohAmZ"
584
+ },
585
+ "source": [
586
+ "### *d. Save df_sales as synthetic_sales_data.csv & view first few lines*"
587
+ ]
588
+ },
589
+ {
590
+ "cell_type": "code",
591
+ "execution_count": 15,
592
+ "metadata": {
593
+ "colab": {
594
+ "base_uri": "https://localhost:8080/"
595
+ },
596
+ "id": "MzbZvLcAhGaH",
597
+ "outputId": "c692bb04-7263-4115-a2ba-c72fe0180722"
598
+ },
599
+ "outputs": [
600
+ {
601
+ "output_type": "stream",
602
+ "name": "stdout",
603
+ "text": [
604
+ " title month units_sold sentiment_label\n",
605
+ "0 A Light in the Attic 2024-08 100 neutral\n",
606
+ "1 A Light in the Attic 2024-09 109 neutral\n",
607
+ "2 A Light in the Attic 2024-10 102 neutral\n",
608
+ "3 A Light in the Attic 2024-11 107 neutral\n",
609
+ "4 A Light in the Attic 2024-12 108 neutral\n"
610
+ ]
611
+ }
612
+ ],
613
+ "source": [
614
+ "df_sales.to_csv(\"synthetic_sales_data.csv\", index=False)\n",
615
+ "\n",
616
+ "print(df_sales.head())"
617
+ ]
618
+ },
619
+ {
620
+ "cell_type": "markdown",
621
+ "metadata": {
622
+ "id": "7g9gqBgQMtJn"
623
+ },
624
+ "source": [
625
+ "## **5.** 🎯 Generate synthetic customer reviews"
626
+ ]
627
+ },
628
+ {
629
+ "cell_type": "markdown",
630
+ "metadata": {
631
+ "id": "Gi4y9M9KuDWx"
632
+ },
633
+ "source": [
634
+ "### *a. ✋🏻🛑⛔️ Ask ChatGPT to create a list of 50 distinct generic book review texts for the sentiment labels \"positive\", \"neutral\", and \"negative\" called synthetic_reviews_by_sentiment*"
635
+ ]
636
+ },
637
+ {
638
+ "cell_type": "code",
639
+ "execution_count": 16,
640
+ "metadata": {
641
+ "id": "b3cd2a50"
642
+ },
643
+ "outputs": [],
644
+ "source": [
645
+ "synthetic_reviews_by_sentiment = {\n",
646
+ " \"positive\": [\n",
647
+ " \"A compelling and heartwarming read that stayed with me long after I finished.\",\n",
648
+ " \"Brilliantly written! The characters were unforgettable and the plot was engaging.\",\n",
649
+ " \"One of the best books I've read this year — inspiring and emotionally rich.\",\n",
650
+ " ],\n",
651
+ " \"neutral\": [\n",
652
+ " \"An average book — not great, but not bad either.\",\n",
653
+ " \"Some parts really stood out, others felt a bit flat.\",\n",
654
+ " \"It was okay overall. A decent way to pass the time.\",\n",
655
+ " ],\n",
656
+ " \"negative\": [\n",
657
+ " \"I struggled to get through this one — it just didn’t grab me.\",\n",
658
+ " \"The plot was confusing and the characters felt underdeveloped.\",\n",
659
+ " \"Disappointing. I had high hopes, but they weren't met.\",\n",
660
+ " ]\n",
661
+ "}"
662
+ ]
663
+ },
664
+ {
665
+ "cell_type": "markdown",
666
+ "metadata": {
667
+ "id": "fQhfVaDmuULT"
668
+ },
669
+ "source": [
670
+ "### *b. Generate 10 reviews per book using random sampling from the corresponding 50*"
671
+ ]
672
+ },
673
+ {
674
+ "cell_type": "code",
675
+ "execution_count": 17,
676
+ "metadata": {
677
+ "id": "l2SRc3PjuTGM"
678
+ },
679
+ "outputs": [],
680
+ "source": [
681
+ "review_rows = []\n",
682
+ "for _, row in df_books.iterrows():\n",
683
+ " title = row['title']\n",
684
+ " sentiment_label = row['sentiment_label']\n",
685
+ " review_pool = synthetic_reviews_by_sentiment[sentiment_label]\n",
686
+ " sampled_reviews = random.sample(review_pool, 10)\n",
687
+ " for review_text in sampled_reviews:\n",
688
+ " review_rows.append({\n",
689
+ " \"title\": title,\n",
690
+ " \"sentiment_label\": sentiment_label,\n",
691
+ " \"review_text\": review_text,\n",
692
+ " \"rating\": row['rating'],\n",
693
+ " \"popularity_score\": row['popularity_score']\n",
694
+ " })"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "markdown",
699
+ "metadata": {
700
+ "id": "bmJMXF-Bukdm"
701
+ },
702
+ "source": [
703
+ "### *c. Create the final dataframe df_reviews & save it as synthetic_book_reviews.csv*"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "code",
708
+ "execution_count": 18,
709
+ "metadata": {
710
+ "id": "ZUKUqZsuumsp"
711
+ },
712
+ "outputs": [],
713
+ "source": [
714
+ "df_reviews = pd.DataFrame(review_rows)\n",
715
+ "df_reviews.to_csv(\"synthetic_book_reviews.csv\", index=False)"
716
+ ]
717
+ },
718
+ {
719
+ "cell_type": "markdown",
720
+ "source": [
721
+ "### *c. inputs for R*"
722
+ ],
723
+ "metadata": {
724
+ "id": "_602pYUS3gY5"
725
+ }
726
+ },
727
+ {
728
+ "cell_type": "code",
729
+ "execution_count": 19,
730
+ "metadata": {
731
+ "colab": {
732
+ "base_uri": "https://localhost:8080/"
733
+ },
734
+ "id": "3946e521",
735
+ "outputId": "514d7bef-0488-4933-b03c-953b9e8a7f66"
736
+ },
737
+ "outputs": [
738
+ {
739
+ "output_type": "stream",
740
+ "name": "stdout",
741
+ "text": [
742
+ "✅ Wrote synthetic_title_level_features.csv\n",
743
+ "✅ Wrote synthetic_monthly_revenue_series.csv\n"
744
+ ]
745
+ }
746
+ ],
747
+ "source": [
748
+ "import numpy as np\n",
749
+ "\n",
750
+ "def _safe_num(s):\n",
751
+ " return pd.to_numeric(\n",
752
+ " pd.Series(s).astype(str).str.replace(r\"[^0-9.]\", \"\", regex=True),\n",
753
+ " errors=\"coerce\"\n",
754
+ " )\n",
755
+ "\n",
756
+ "# --- Clean book metadata (price/rating) ---\n",
757
+ "df_books_r = df_books.copy()\n",
758
+ "if \"price\" in df_books_r.columns:\n",
759
+ " df_books_r[\"price\"] = _safe_num(df_books_r[\"price\"])\n",
760
+ "if \"rating\" in df_books_r.columns:\n",
761
+ " df_books_r[\"rating\"] = _safe_num(df_books_r[\"rating\"])\n",
762
+ "\n",
763
+ "df_books_r[\"title\"] = df_books_r[\"title\"].astype(str).str.strip()\n",
764
+ "\n",
765
+ "# --- Clean sales ---\n",
766
+ "df_sales_r = df_sales.copy()\n",
767
+ "df_sales_r[\"title\"] = df_sales_r[\"title\"].astype(str).str.strip()\n",
768
+ "df_sales_r[\"month\"] = pd.to_datetime(df_sales_r[\"month\"], errors=\"coerce\")\n",
769
+ "df_sales_r[\"units_sold\"] = _safe_num(df_sales_r[\"units_sold\"])\n",
770
+ "\n",
771
+ "# --- Clean reviews ---\n",
772
+ "df_reviews_r = df_reviews.copy()\n",
773
+ "df_reviews_r[\"title\"] = df_reviews_r[\"title\"].astype(str).str.strip()\n",
774
+ "df_reviews_r[\"sentiment_label\"] = df_reviews_r[\"sentiment_label\"].astype(str).str.lower().str.strip()\n",
775
+ "if \"rating\" in df_reviews_r.columns:\n",
776
+ " df_reviews_r[\"rating\"] = _safe_num(df_reviews_r[\"rating\"])\n",
777
+ "if \"popularity_score\" in df_reviews_r.columns:\n",
778
+ " df_reviews_r[\"popularity_score\"] = _safe_num(df_reviews_r[\"popularity_score\"])\n",
779
+ "\n",
780
+ "# --- Sentiment shares per title (from reviews) ---\n",
781
+ "sent_counts = (\n",
782
+ " df_reviews_r.groupby([\"title\", \"sentiment_label\"])\n",
783
+ " .size()\n",
784
+ " .unstack(fill_value=0)\n",
785
+ ")\n",
786
+ "for lab in [\"positive\", \"neutral\", \"negative\"]:\n",
787
+ " if lab not in sent_counts.columns:\n",
788
+ " sent_counts[lab] = 0\n",
789
+ "\n",
790
+ "sent_counts[\"total_reviews\"] = sent_counts[[\"positive\", \"neutral\", \"negative\"]].sum(axis=1)\n",
791
+ "den = sent_counts[\"total_reviews\"].replace(0, np.nan)\n",
792
+ "sent_counts[\"share_positive\"] = sent_counts[\"positive\"] / den\n",
793
+ "sent_counts[\"share_neutral\"] = sent_counts[\"neutral\"] / den\n",
794
+ "sent_counts[\"share_negative\"] = sent_counts[\"negative\"] / den\n",
795
+ "sent_counts = sent_counts.reset_index()\n",
796
+ "\n",
797
+ "# --- Sales aggregation per title ---\n",
798
+ "sales_by_title = (\n",
799
+ " df_sales_r.dropna(subset=[\"title\"])\n",
800
+ " .groupby(\"title\", as_index=False)\n",
801
+ " .agg(\n",
802
+ " months_observed=(\"month\", \"nunique\"),\n",
803
+ " avg_units_sold=(\"units_sold\", \"mean\"),\n",
804
+ " total_units_sold=(\"units_sold\", \"sum\"),\n",
805
+ " )\n",
806
+ ")\n",
807
+ "\n",
808
+ "# --- Title-level features (join sales + books + sentiment) ---\n",
809
+ "df_title = (\n",
810
+ " sales_by_title\n",
811
+ " .merge(df_books_r[[\"title\", \"price\", \"rating\"]], on=\"title\", how=\"left\")\n",
812
+ " .merge(sent_counts[[\"title\", \"share_positive\", \"share_neutral\", \"share_negative\", \"total_reviews\"]],\n",
813
+ " on=\"title\", how=\"left\")\n",
814
+ ")\n",
815
+ "\n",
816
+ "df_title[\"avg_revenue\"] = df_title[\"avg_units_sold\"] * df_title[\"price\"]\n",
817
+ "df_title[\"total_revenue\"] = df_title[\"total_units_sold\"] * df_title[\"price\"]\n",
818
+ "\n",
819
+ "df_title.to_csv(\"synthetic_title_level_features.csv\", index=False)\n",
820
+ "print(\"✅ Wrote synthetic_title_level_features.csv\")\n",
821
+ "\n",
822
+ "# --- Monthly revenue series (proxy: units_sold * price) ---\n",
823
+ "monthly_rev = (\n",
824
+ " df_sales_r.merge(df_books_r[[\"title\", \"price\"]], on=\"title\", how=\"left\")\n",
825
+ ")\n",
826
+ "monthly_rev[\"revenue\"] = monthly_rev[\"units_sold\"] * monthly_rev[\"price\"]\n",
827
+ "\n",
828
+ "df_monthly = (\n",
829
+ " monthly_rev.dropna(subset=[\"month\"])\n",
830
+ " .groupby(\"month\", as_index=False)[\"revenue\"]\n",
831
+ " .sum()\n",
832
+ " .rename(columns={\"revenue\": \"total_revenue\"})\n",
833
+ " .sort_values(\"month\")\n",
834
+ ")\n",
835
+ "# if revenue is all NA (e.g., missing price), fallback to units_sold as a teaching proxy\n",
836
+ "if df_monthly[\"total_revenue\"].notna().sum() == 0:\n",
837
+ " df_monthly = (\n",
838
+ " df_sales_r.dropna(subset=[\"month\"])\n",
839
+ " .groupby(\"month\", as_index=False)[\"units_sold\"]\n",
840
+ " .sum()\n",
841
+ " .rename(columns={\"units_sold\": \"total_revenue\"})\n",
842
+ " .sort_values(\"month\")\n",
843
+ " )\n",
844
+ "\n",
845
+ "df_monthly[\"month\"] = pd.to_datetime(df_monthly[\"month\"], errors=\"coerce\").dt.strftime(\"%Y-%m-%d\")\n",
846
+ "df_monthly.to_csv(\"synthetic_monthly_revenue_series.csv\", index=False)\n",
847
+ "print(\"✅ Wrote synthetic_monthly_revenue_series.csv\")\n"
848
+ ]
849
+ },
850
+ {
851
+ "cell_type": "markdown",
852
+ "metadata": {
853
+ "id": "RYvGyVfXuo54"
854
+ },
855
+ "source": [
856
+ "### *d. ✋🏻🛑⛔️ View the first few lines*"
857
+ ]
858
+ },
859
+ {
860
+ "cell_type": "code",
861
+ "execution_count": 20,
862
+ "metadata": {
863
+ "colab": {
864
+ "base_uri": "https://localhost:8080/"
865
+ },
866
+ "id": "xfE8NMqOurKo",
867
+ "outputId": "191730ba-d5e2-4df7-97d2-99feb0b704af"
868
+ },
869
+ "outputs": [
870
+ {
871
+ "output_type": "stream",
872
+ "name": "stdout",
873
+ "text": [
874
+ " title sentiment_label \\\n",
875
+ "0 A Light in the Attic neutral \n",
876
+ "1 A Light in the Attic neutral \n",
877
+ "2 A Light in the Attic neutral \n",
878
+ "3 A Light in the Attic neutral \n",
879
+ "4 A Light in the Attic neutral \n",
880
+ "\n",
881
+ " review_text rating popularity_score \n",
882
+ "0 Had potential that went unrealized. Three 3 \n",
883
+ "1 The themes were solid, but not well explored. Three 3 \n",
884
+ "2 It simply lacked that emotional punch. Three 3 \n",
885
+ "3 Serviceable but not something I'd go out of my... Three 3 \n",
886
+ "4 Standard fare with some promise. Three 3 \n"
887
+ ]
888
+ }
889
+ ],
890
+ "source": []
891
+ }
892
+ ],
893
+ "metadata": {
894
+ "colab": {
895
+ "collapsed_sections": [
896
+ "jpASMyIQMaAq",
897
+ "lquNYCbfL9IM",
898
+ "0IWuNpxxYDJF",
899
+ "oCdTsin2Yfp3",
900
+ "T0TOeRC4Yrnn",
901
+ "duI5dv3CZYvF",
902
+ "qMjRKMBQZlJi",
903
+ "p-1Pr2szaqLk",
904
+ "SIaJUGIpaH4V",
905
+ "pY4yCoIuaQqp",
906
+ "n4-TaNTFgPak",
907
+ "HnngRNTgacYt",
908
+ "HF9F9HIzgT7Z",
909
+ "T8AdKkmASq9a",
910
+ "OhXbdGD5fH0c",
911
+ "L2ak1HlcgoTe",
912
+ "4IXZKcCSgxnq",
913
+ "EhIjz9WohAmZ",
914
+ "Gi4y9M9KuDWx",
915
+ "fQhfVaDmuULT",
916
+ "bmJMXF-Bukdm",
917
+ "RYvGyVfXuo54"
918
+ ],
919
+ "provenance": []
920
+ },
921
+ "kernelspec": {
922
+ "display_name": "Python 3",
923
+ "name": "python3"
924
+ },
925
+ "language_info": {
926
+ "name": "python"
927
+ }
928
+ },
929
+ "nbformat": 4,
930
+ "nbformat_minor": 0
931
+ }
2a_Python_Analysis-2.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
R analysis.ipynb ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "75fd9cc6",
6
+ "metadata": {
7
+ "id": "75fd9cc6"
8
+ },
9
+ "source": [
10
+ "# **🤖 Benchmarking & Modeling**"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "fb807724",
16
+ "metadata": {
17
+ "id": "fb807724"
18
+ },
19
+ "source": [
20
+ "## **1.** 📦 Setup"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "d40cd131",
27
+ "metadata": {
28
+ "id": "d40cd131"
29
+ },
30
+ "outputs": [],
31
+ "source": [
32
+ "\n",
33
+ "# Uncomment the next line once:\n",
34
+ "install.packages(c(\"readr\",\"dplyr\",\"stringr\",\"tidyr\",\"lubridate\",\"ggplot2\",\"forecast\",\"broom\",\"jsonlite\"), repos=\"https://cloud.r-project.org\")\n",
35
+ "\n",
36
+ "suppressPackageStartupMessages({\n",
37
+ " library(readr)\n",
38
+ " library(dplyr)\n",
39
+ " library(stringr)\n",
40
+ " library(tidyr)\n",
41
+ " library(lubridate)\n",
42
+ " library(ggplot2)\n",
43
+ " library(forecast)\n",
44
+ " library(broom)\n",
45
+ " library(jsonlite)\n",
46
+ "})"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "markdown",
51
+ "id": "f01d02e7",
52
+ "metadata": {
53
+ "id": "f01d02e7"
54
+ },
55
+ "source": [
56
+ "## **2.** ✅️ Load & inspect inputs"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "id": "29e8f6ce",
63
+ "metadata": {
64
+ "colab": {
65
+ "base_uri": "https://localhost:8080/"
66
+ },
67
+ "id": "29e8f6ce",
68
+ "outputId": "5a1bda1c-c58d-43d0-c85e-db5041c8bc49"
69
+ },
70
+ "outputs": [
71
+ {
72
+ "output_type": "stream",
73
+ "name": "stdout",
74
+ "text": [
75
+ "Loaded: 1000 rows (title-level), 18 rows (monthly)\n"
76
+ ]
77
+ }
78
+ ],
79
+ "source": [
80
+ "\n",
81
+ "must_exist <- function(path, label) {\n",
82
+ " if (!file.exists(path)) stop(paste0(\"Missing \", label, \": \", path))\n",
83
+ "}\n",
84
+ "\n",
85
+ "TITLE_PATH <- \"synthetic_title_level_features.csv\"\n",
86
+ "MONTH_PATH <- \"synthetic_monthly_revenue_series.csv\"\n",
87
+ "\n",
88
+ "must_exist(TITLE_PATH, \"TITLE_PATH\")\n",
89
+ "must_exist(MONTH_PATH, \"MONTH_PATH\")\n",
90
+ "\n",
91
+ "df_title <- read_csv(TITLE_PATH, show_col_types = FALSE)\n",
92
+ "df_month <- read_csv(MONTH_PATH, show_col_types = FALSE)\n",
93
+ "\n",
94
+ "cat(\"Loaded:\", nrow(df_title), \"rows (title-level),\", nrow(df_month), \"rows (monthly)\n",
95
+ "\")"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": null,
101
+ "id": "9fd04262",
102
+ "metadata": {
103
+ "colab": {
104
+ "base_uri": "https://localhost:8080/"
105
+ },
106
+ "id": "9fd04262",
107
+ "outputId": "5f031538-96be-4758-904d-9201ec3c3ea7"
108
+ },
109
+ "outputs": [
110
+ {
111
+ "output_type": "stream",
112
+ "name": "stdout",
113
+ "text": [
114
+ "\u001b[90m# A tibble: 1 × 6\u001b[39m\n",
115
+ " n na_avg_revenue na_price na_rating na_share_pos na_share_neg\n",
116
+ " \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m\n",
117
+ "\u001b[90m1\u001b[39m \u001b[4m1\u001b[24m000 0 0 \u001b[4m1\u001b[24m000 0 0\n",
118
+ "Monthly rows after parsing: 18 \n"
119
+ ]
120
+ }
121
+ ],
122
+ "source": [
123
+ "\n",
124
+ "# ---------- helpers ----------\n",
125
+ "safe_num <- function(x) {\n",
126
+ " # strips anything that is not digit or dot\n",
127
+ " suppressWarnings(as.numeric(str_replace_all(as.character(x), \"[^0-9.]\", \"\")))\n",
128
+ "}\n",
129
+ "\n",
130
+ "parse_rating <- function(x) {\n",
131
+ " # Accept: 4, \"4\", \"4.0\", \"4/5\", \"4 out of 5\", \"⭐⭐⭐⭐\", etc.\n",
132
+ " x <- as.character(x)\n",
133
+ " x <- str_replace_all(x, \"⭐\", \"\")\n",
134
+ " x <- str_to_lower(x)\n",
135
+ " x <- str_replace_all(x, \"stars?\", \"\")\n",
136
+ " x <- str_replace_all(x, \"out of\", \"/\")\n",
137
+ " x <- str_replace_all(x, \"\\\\s+\", \"\")\n",
138
+ " x <- str_replace_all(x, \"[^0-9./]\", \"\")\n",
139
+ " suppressWarnings(as.numeric(str_extract(x, \"^[0-9.]+\")))\n",
140
+ "}\n",
141
+ "\n",
142
+ "parse_month <- function(x) {\n",
143
+ " x <- as.character(x)\n",
144
+ " # try YYYY-MM-DD, then YYYY-MM\n",
145
+ " out <- suppressWarnings(ymd(x))\n",
146
+ " if (mean(is.na(out)) > 0.5) out <- suppressWarnings(ymd(paste0(x, \"-01\")))\n",
147
+ " na_idx <- which(is.na(out))\n",
148
+ " if (length(na_idx) > 0) out[na_idx] <- suppressWarnings(ymd(paste0(x[na_idx], \"-01\")))\n",
149
+ " out\n",
150
+ "}\n",
151
+ "\n",
152
+ "# ---------- normalize keys ----------\n",
153
+ "df_title <- df_title %>% mutate(title = str_squish(as.character(title)))\n",
154
+ "df_month <- df_month %>% mutate(month = as.character(month))\n",
155
+ "\n",
156
+ "# ---------- parse numeric columns defensively ----------\n",
157
+ "need_cols_title <- c(\"title\",\"avg_revenue\",\"total_revenue\",\"price\",\"rating\",\"share_positive\",\"share_negative\",\"share_neutral\")\n",
158
+ "missing_title <- setdiff(need_cols_title, names(df_title))\n",
159
+ "if (length(missing_title) > 0) stop(paste0(\"df_title missing columns: \", paste(missing_title, collapse=\", \")))\n",
160
+ "\n",
161
+ "df_title <- df_title %>%\n",
162
+ " mutate(\n",
163
+ " avg_revenue = safe_num(avg_revenue),\n",
164
+ " total_revenue = safe_num(total_revenue),\n",
165
+ " price = safe_num(price),\n",
166
+ " rating = parse_rating(rating),\n",
167
+ " share_positive = safe_num(share_positive),\n",
168
+ " share_negative = safe_num(share_negative),\n",
169
+ " share_neutral = safe_num(share_neutral)\n",
170
+ " )\n",
171
+ "\n",
172
+ "# basic sanity stats\n",
173
+ "hyg <- df_title %>%\n",
174
+ " summarise(\n",
175
+ " n = n(),\n",
176
+ " na_avg_revenue = sum(is.na(avg_revenue)),\n",
177
+ " na_price = sum(is.na(price)),\n",
178
+ " na_rating = sum(is.na(rating)),\n",
179
+ " na_share_pos = sum(is.na(share_positive)),\n",
180
+ " na_share_neg = sum(is.na(share_negative))\n",
181
+ " )\n",
182
+ "\n",
183
+ "print(hyg)\n",
184
+ "\n",
185
+ "# monthly parsing\n",
186
+ "need_cols_month <- c(\"month\",\"total_revenue\")\n",
187
+ "missing_month <- setdiff(need_cols_month, names(df_month))\n",
188
+ "if (length(missing_month) > 0) stop(paste0(\"df_month missing columns: \", paste(missing_month, collapse=\", \")))\n",
189
+ "\n",
190
+ "df_month2 <- df_month %>%\n",
191
+ " mutate(\n",
192
+ " month = parse_month(month),\n",
193
+ " total_revenue = safe_num(total_revenue)\n",
194
+ " ) %>%\n",
195
+ " filter(!is.na(month)) %>%\n",
196
+ " arrange(month)\n",
197
+ "\n",
198
+ "cat(\"Monthly rows after parsing:\", nrow(df_month2), \"\\n\")"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "markdown",
203
+ "id": "b8971bc4",
204
+ "metadata": {
205
+ "id": "b8971bc4"
206
+ },
207
+ "source": [
208
+ "## **3.** 💾 Folder for R outputs for Hugging Face"
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": null,
214
+ "id": "dfaa06b1",
215
+ "metadata": {
216
+ "colab": {
217
+ "base_uri": "https://localhost:8080/"
218
+ },
219
+ "id": "dfaa06b1",
220
+ "outputId": "73f6437a-39f4-4968-f88a-99f10a3fd8ae"
221
+ },
222
+ "outputs": [
223
+ {
224
+ "output_type": "stream",
225
+ "name": "stdout",
226
+ "text": [
227
+ "R outputs will be written to: /content/artifacts/r \n"
228
+ ]
229
+ }
230
+ ],
231
+ "source": [
232
+ "\n",
233
+ "ART_DIR <- \"artifacts\"\n",
234
+ "R_FIG_DIR <- file.path(ART_DIR, \"r\", \"figures\")\n",
235
+ "R_TAB_DIR <- file.path(ART_DIR, \"r\", \"tables\")\n",
236
+ "\n",
237
+ "dir.create(R_FIG_DIR, recursive = TRUE, showWarnings = FALSE)\n",
238
+ "dir.create(R_TAB_DIR, recursive = TRUE, showWarnings = FALSE)\n",
239
+ "\n",
240
+ "cat(\"R outputs will be written to:\", normalizePath(file.path(ART_DIR, \"r\"), winslash = \"/\"), \"\n",
241
+ "\")"
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "markdown",
246
+ "id": "f880c72d",
247
+ "metadata": {
248
+ "id": "f880c72d"
249
+ },
250
+ "source": [
251
+ "## **4.** 🔮 Forecast book sales benchmarking with `accuracy()`"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "markdown",
256
+ "source": [
257
+ "We benchmark **three** models on a holdout window (last *h* months):\n",
258
+ "- ARIMA + Fourier (seasonality upgrade)\n",
259
+ "- ETS\n",
260
+ "- Naive baseline\n",
261
+ "\n",
262
+ "Then we export:\n",
263
+ "- `accuracy_table.csv`\n",
264
+ "- `forecast_compare.png`\n",
265
+ "- `rmse_comparison.png`"
266
+ ],
267
+ "metadata": {
268
+ "id": "R0JZlzKegmzW"
269
+ },
270
+ "id": "R0JZlzKegmzW"
271
+ },
272
+ {
273
+ "cell_type": "code",
274
+ "execution_count": null,
275
+ "id": "62e87992",
276
+ "metadata": {
277
+ "colab": {
278
+ "base_uri": "https://localhost:8080/"
279
+ },
280
+ "id": "62e87992",
281
+ "outputId": "73b36487-a25d-4bb9-cf80-8d5a654a2f0d"
282
+ },
283
+ "outputs": [
284
+ {
285
+ "output_type": "stream",
286
+ "name": "stdout",
287
+ "text": [
288
+ "✅ Saved: artifacts/r/tables/accuracy_table.csv\n",
289
+ "✅ Saved: artifacts/r/figures/rmse_comparison.png\n"
290
+ ]
291
+ },
292
+ {
293
+ "output_type": "display_data",
294
+ "data": {
295
+ "text/html": [
296
+ "<strong>agg_record_872216040:</strong> 2"
297
+ ],
298
+ "text/markdown": "**agg_record_872216040:** 2",
299
+ "text/latex": "\\textbf{agg\\textbackslash{}\\_record\\textbackslash{}\\_872216040:} 2",
300
+ "text/plain": [
301
+ "agg_record_872216040 \n",
302
+ " 2 "
303
+ ]
304
+ },
305
+ "metadata": {}
306
+ },
307
+ {
308
+ "output_type": "stream",
309
+ "name": "stdout",
310
+ "text": [
311
+ "✅ Saved: artifacts/r/figures/forecast_compare.png\n"
312
+ ]
313
+ }
314
+ ],
315
+ "source": [
316
+ "\n",
317
+ "# Build monthly ts\n",
318
+ "start_year <- year(min(df_month2$month, na.rm = TRUE))\n",
319
+ "start_mon <- month(min(df_month2$month, na.rm = TRUE))\n",
320
+ "\n",
321
+ "y <- ts(df_month2$total_revenue, frequency = 12, start = c(start_year, start_mon))\n",
322
+ "\n",
323
+ "# holdout size: min(6, 20% of series), at least 1\n",
324
+ "h_test <- min(6, max(1, floor(length(y) / 5)))\n",
325
+ "train_ts <- head(y, length(y) - h_test)\n",
326
+ "test_ts <- tail(y, h_test)\n",
327
+ "\n",
328
+ "# Model A: ARIMA + Fourier\n",
329
+ "K <- 2\n",
330
+ "xreg_train <- fourier(train_ts, K = K)\n",
331
+ "fit_arima <- auto.arima(train_ts, xreg = xreg_train)\n",
332
+ "xreg_future <- fourier(train_ts, K = K, h = h_test)\n",
333
+ "fc_arima <- forecast(fit_arima, xreg = xreg_future, h = h_test)\n",
334
+ "\n",
335
+ "# Model B: ETS\n",
336
+ "fit_ets <- ets(train_ts)\n",
337
+ "fc_ets <- forecast(fit_ets, h = h_test)\n",
338
+ "\n",
339
+ "# Model C: Naive baseline\n",
340
+ "fc_naive <- naive(train_ts, h = h_test)\n",
341
+ "\n",
342
+ "# accuracy() tables\n",
343
+ "acc_arima <- as.data.frame(accuracy(fc_arima, test_ts))\n",
344
+ "acc_ets <- as.data.frame(accuracy(fc_ets, test_ts))\n",
345
+ "acc_naive <- as.data.frame(accuracy(fc_naive, test_ts))\n",
346
+ "\n",
347
+ "accuracy_tbl <- bind_rows(\n",
348
+ " acc_arima %>% mutate(model = \"ARIMA+Fourier\"),\n",
349
+ " acc_ets %>% mutate(model = \"ETS\"),\n",
350
+ " acc_naive %>% mutate(model = \"Naive\")\n",
351
+ ") %>% relocate(model)\n",
352
+ "\n",
353
+ "write_csv(accuracy_tbl, file.path(R_TAB_DIR, \"accuracy_table.csv\"))\n",
354
+ "cat(\"✅ Saved: artifacts/r/tables/accuracy_table.csv\\n\")\n",
355
+ "\n",
356
+ "# RMSE bar chart\n",
357
+ "p_rmse <- ggplot(accuracy_tbl, aes(x = reorder(model, RMSE), y = RMSE)) +\n",
358
+ " geom_col() +\n",
359
+ " coord_flip() +\n",
360
+ " labs(title = \"Forecast model comparison (RMSE on holdout)\", x = \"\", y = \"RMSE\") +\n",
361
+ " theme_minimal()\n",
362
+ "\n",
363
+ "ggsave(file.path(R_FIG_DIR, \"rmse_comparison.png\"), p_rmse, width = 8, height = 4, dpi = 160)\n",
364
+ "cat(\"✅ Saved: artifacts/r/figures/rmse_comparison.png\\n\")\n",
365
+ "\n",
366
+ "# Side-by-side forecast plots (simple, no extra deps)\n",
367
+ "png(file.path(R_FIG_DIR, \"forecast_compare.png\"), width = 1200, height = 500)\n",
368
+ "par(mfrow = c(1, 3))\n",
369
+ "plot(fc_arima, main = \"ARIMA + Fourier\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
370
+ "plot(fc_ets, main = \"ETS\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
371
+ "plot(fc_naive, main = \"Naive\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
372
+ "dev.off()\n",
373
+ "cat(\"✅ Saved: artifacts/r/figures/forecast_compare.png\\n\")"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "markdown",
378
+ "id": "30bc017b",
379
+ "metadata": {
380
+ "id": "30bc017b"
381
+ },
382
+ "source": [
383
+ "## **5.** 💾 Some R metadata for Hugging Face"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": null,
389
+ "id": "645cb12b",
390
+ "metadata": {
391
+ "colab": {
392
+ "base_uri": "https://localhost:8080/"
393
+ },
394
+ "id": "645cb12b",
395
+ "outputId": "c00c26da-7d27-4c78-a296-aa33807495d4"
396
+ },
397
+ "outputs": [
398
+ {
399
+ "output_type": "stream",
400
+ "name": "stdout",
401
+ "text": [
402
+ "✅ Saved: artifacts/r/tables/r_meta.json\n",
403
+ "DONE. R artifacts written to: artifacts/r \n"
404
+ ]
405
+ }
406
+ ],
407
+ "source": [
408
+ "# =========================================================\n",
409
+ "# Metadata export (aligned with current notebook objects)\n",
410
+ "# =========================================================\n",
411
+ "\n",
412
+ "meta <- list(\n",
413
+ "\n",
414
+ " # ---------------------------\n",
415
+ " # Dataset footprint\n",
416
+ " # ---------------------------\n",
417
+ " n_titles = nrow(df_title),\n",
418
+ " n_months = nrow(df_month2),\n",
419
+ "\n",
420
+ " # ---------------------------\n",
421
+ " # Forecasting info\n",
422
+ " # (only if these objects exist in your forecasting section)\n",
423
+ " # ---------------------------\n",
424
+ " forecasting = list(\n",
425
+ " holdout_h = h_test,\n",
426
+ " arima_order = forecast::arimaorder(fit_arima),\n",
427
+ " ets_method = fit_ets$method\n",
428
+ " )\n",
429
+ ")\n",
430
+ "\n",
431
+ "jsonlite::write_json(\n",
432
+ " meta,\n",
433
+ " path = file.path(R_TAB_DIR, \"r_meta.json\"),\n",
434
+ " pretty = TRUE,\n",
435
+ " auto_unbox = TRUE\n",
436
+ ")\n",
437
+ "\n",
438
+ "cat(\"✅ Saved: artifacts/r/tables/r_meta.json\\n\")\n",
439
+ "cat(\"DONE. R artifacts written to:\", file.path(ART_DIR, \"r\"), \"\\n\")\n"
440
+ ]
441
+ }
442
+ ],
443
+ "metadata": {
444
+ "colab": {
445
+ "provenance": [],
446
+ "collapsed_sections": [
447
+ "f01d02e7",
448
+ "b8971bc4",
449
+ "f880c72d",
450
+ "30bc017b"
451
+ ]
452
+ },
453
+ "kernelspec": {
454
+ "name": "ir",
455
+ "display_name": "R"
456
+ },
457
+ "language_info": {
458
+ "name": "R"
459
+ }
460
+ },
461
+ "nbformat": 4,
462
+ "nbformat_minor": 5
463
+ }