atascioglu commited on
Commit
cf1b7a0
·
verified ·
1 Parent(s): d3d019c

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ background_top.png filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ ENV PYTHONDONTWRITEBYTECODE=1
5
+ ENV PYTHONUNBUFFERED=1
6
+
7
+ ENV GRADIO_SERVER_NAME=0.0.0.0
8
+ ENV GRADIO_SERVER_PORT=7860
9
+
10
+ WORKDIR /app
11
+ COPY . /app
12
+
13
+ # Python deps (from requirements.txt)
14
+ RUN pip install --no-cache-dir -r requirements.txt
15
+
16
+ # Notebook execution deps
17
+ RUN pip install --no-cache-dir notebook ipykernel papermill
18
+
19
+ # Pre-install packages the notebooks use via !pip install
20
+ RUN pip install --no-cache-dir textblob faker vaderSentiment transformers
21
+
22
+ RUN python -m ipykernel install --user --name python3 --display-name "Python 3"
23
+
24
+ EXPOSE 7860
25
+
26
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -1,10 +1,11 @@
1
  ---
2
- title: SE21AppTemplate
3
- emoji: 🔥
4
- colorFrom: purple
5
- colorTo: red
6
  sdk: docker
7
  pinned: false
 
8
  ---
9
 
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: AIBDM 2026 Workshop App
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: docker
7
  pinned: false
8
+ short_description: AI-enhanced analytics dashboard for ESCP AIBDM course
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+ from typing import Dict, Any, List, Tuple
8
+
9
+ import pandas as pd
10
+ import gradio as gr
11
+ import papermill as pm
12
+ import plotly.graph_objects as go
13
+
14
+ # Optional LLM (HuggingFace Inference API)
15
+ try:
16
+ from huggingface_hub import InferenceClient
17
+ except Exception:
18
+ InferenceClient = None
19
+
20
+ # =========================================================
21
+ # CONFIG
22
+ # =========================================================
23
+
24
+ BASE_DIR = Path(__file__).resolve().parent
25
+
26
+ NB1 = os.environ.get("NB1", "datacreation.ipynb").strip()
27
+ NB2 = os.environ.get("NB2", "pythonanalysis.ipynb").strip()
28
+
29
+ RUNS_DIR = BASE_DIR / "runs"
30
+ ART_DIR = BASE_DIR / "artifacts"
31
+ PY_FIG_DIR = ART_DIR / "py" / "figures"
32
+ PY_TAB_DIR = ART_DIR / "py" / "tables"
33
+
34
+ PAPERMILL_TIMEOUT = int(os.environ.get("PAPERMILL_TIMEOUT", "1800"))
35
+ MAX_PREVIEW_ROWS = int(os.environ.get("MAX_FILE_PREVIEW_ROWS", "50"))
36
+ MAX_LOG_CHARS = int(os.environ.get("MAX_LOG_CHARS", "8000"))
37
+
38
+ HF_API_KEY = os.environ.get("HF_API_KEY", "").strip()
39
+ MODEL_NAME = os.environ.get("MODEL_NAME", "deepseek-ai/DeepSeek-R1").strip()
40
+ HF_PROVIDER = os.environ.get("HF_PROVIDER", "novita").strip()
41
+
42
+ LLM_ENABLED = bool(HF_API_KEY) and InferenceClient is not None
43
+ llm_client = (
44
+ InferenceClient(provider=HF_PROVIDER, api_key=HF_API_KEY)
45
+ if LLM_ENABLED
46
+ else None
47
+ )
48
+
49
+ # =========================================================
50
+ # HELPERS
51
+ # =========================================================
52
+
53
+ def ensure_dirs():
54
+ for p in [RUNS_DIR, ART_DIR, PY_FIG_DIR, PY_TAB_DIR]:
55
+ p.mkdir(parents=True, exist_ok=True)
56
+
57
+ def stamp():
58
+ return time.strftime("%Y%m%d-%H%M%S")
59
+
60
+ def tail(text: str, n: int = MAX_LOG_CHARS) -> str:
61
+ return (text or "")[-n:]
62
+
63
+ def _ls(dir_path: Path, exts: Tuple[str, ...]) -> List[str]:
64
+ if not dir_path.is_dir():
65
+ return []
66
+ return sorted(p.name for p in dir_path.iterdir() if p.is_file() and p.suffix.lower() in exts)
67
+
68
+ def _read_csv(path: Path) -> pd.DataFrame:
69
+ return pd.read_csv(path, nrows=MAX_PREVIEW_ROWS)
70
+
71
+ def _read_json(path: Path):
72
+ with path.open(encoding="utf-8") as f:
73
+ return json.load(f)
74
+
75
+ def artifacts_index() -> Dict[str, Any]:
76
+ return {
77
+ "python": {
78
+ "figures": _ls(PY_FIG_DIR, (".png", ".jpg", ".jpeg")),
79
+ "tables": _ls(PY_TAB_DIR, (".csv", ".json")),
80
+ },
81
+ }
82
+
83
+ # =========================================================
84
+ # PIPELINE RUNNERS
85
+ # =========================================================
86
+
87
+ def run_notebook(nb_name: str) -> str:
88
+ ensure_dirs()
89
+ nb_in = BASE_DIR / nb_name
90
+ if not nb_in.exists():
91
+ return f"ERROR: {nb_name} not found."
92
+ nb_out = RUNS_DIR / f"run_{stamp()}_{nb_name}"
93
+ pm.execute_notebook(
94
+ input_path=str(nb_in),
95
+ output_path=str(nb_out),
96
+ cwd=str(BASE_DIR),
97
+ log_output=True,
98
+ progress_bar=False,
99
+ request_save_on_cell_execute=True,
100
+ execution_timeout=PAPERMILL_TIMEOUT,
101
+ )
102
+ return f"Executed {nb_name}"
103
+
104
+
105
+ def run_datacreation() -> str:
106
+ try:
107
+ log = run_notebook(NB1)
108
+ csvs = [f.name for f in BASE_DIR.glob("*.csv")]
109
+ return f"OK {log}\n\nCSVs now in /app:\n" + "\n".join(f" - {c}" for c in sorted(csvs))
110
+ except Exception as e:
111
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
112
+
113
+
114
+ def run_pythonanalysis() -> str:
115
+ try:
116
+ log = run_notebook(NB2)
117
+ idx = artifacts_index()
118
+ figs = idx["python"]["figures"]
119
+ tabs = idx["python"]["tables"]
120
+ return (
121
+ f"OK {log}\n\n"
122
+ f"Figures: {', '.join(figs) or '(none)'}\n"
123
+ f"Tables: {', '.join(tabs) or '(none)'}"
124
+ )
125
+ except Exception as e:
126
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
127
+
128
+
129
+ def run_full_pipeline() -> str:
130
+ logs = []
131
+ logs.append("=" * 50)
132
+ logs.append("STEP 1/2: Data Creation (web scraping + synthetic data)")
133
+ logs.append("=" * 50)
134
+ logs.append(run_datacreation())
135
+ logs.append("")
136
+ logs.append("=" * 50)
137
+ logs.append("STEP 2/2: Python Analysis (sentiment, ARIMA, dashboard)")
138
+ logs.append("=" * 50)
139
+ logs.append(run_pythonanalysis())
140
+ return "\n".join(logs)
141
+
142
+
143
+ # =========================================================
144
+ # GALLERY LOADERS
145
+ # =========================================================
146
+
147
+ def _load_all_figures() -> List[Tuple[str, str]]:
148
+ """Return list of (filepath, caption) for Gallery."""
149
+ items = []
150
+ for p in sorted(PY_FIG_DIR.glob("*.png")):
151
+ items.append((str(p), p.stem.replace('_', ' ').title()))
152
+ return items
153
+
154
+
155
+ def _load_table_safe(path: Path) -> pd.DataFrame:
156
+ try:
157
+ if path.suffix == ".json":
158
+ obj = _read_json(path)
159
+ if isinstance(obj, dict):
160
+ return pd.DataFrame([obj])
161
+ return pd.DataFrame(obj)
162
+ return _read_csv(path)
163
+ except Exception as e:
164
+ return pd.DataFrame([{"error": str(e)}])
165
+
166
+
167
+ def refresh_gallery():
168
+ """Called when user clicks Refresh on Gallery tab."""
169
+ figures = _load_all_figures()
170
+ idx = artifacts_index()
171
+
172
+ table_choices = list(idx["python"]["tables"])
173
+
174
+ default_df = pd.DataFrame()
175
+ if table_choices:
176
+ default_df = _load_table_safe(PY_TAB_DIR / table_choices[0])
177
+
178
+ return (
179
+ figures if figures else [],
180
+ gr.update(choices=table_choices, value=table_choices[0] if table_choices else None),
181
+ default_df,
182
+ )
183
+
184
+
185
+ def on_table_select(choice: str):
186
+ if not choice:
187
+ return pd.DataFrame([{"hint": "Select a table above."}])
188
+ path = PY_TAB_DIR / choice
189
+ if not path.exists():
190
+ return pd.DataFrame([{"error": f"File not found: {choice}"}])
191
+ return _load_table_safe(path)
192
+
193
+
194
+ # =========================================================
195
+ # KPI LOADER
196
+ # =========================================================
197
+
198
+ def load_kpis() -> Dict[str, Any]:
199
+ for candidate in [PY_TAB_DIR / "kpis.json", PY_FIG_DIR / "kpis.json"]:
200
+ if candidate.exists():
201
+ try:
202
+ return _read_json(candidate)
203
+ except Exception:
204
+ pass
205
+ return {}
206
+
207
+
208
+ # =========================================================
209
+ # AI DASHBOARD -- LLM picks what to display
210
+ # =========================================================
211
+
212
+ DASHBOARD_SYSTEM = """You are an AI dashboard assistant for a book-sales analytics app.
213
+ The user asks questions or requests about their data. You have access to pre-computed
214
+ artifacts from a Python analysis pipeline.
215
+
216
+ AVAILABLE ARTIFACTS (only reference ones that exist):
217
+ {artifacts_json}
218
+
219
+ KPI SUMMARY: {kpis_json}
220
+
221
+ YOUR JOB:
222
+ 1. Answer the user's question conversationally using the KPIs and your knowledge of the artifacts.
223
+ 2. At the END of your response, output a JSON block (fenced with ```json ... ```) that tells
224
+ the dashboard which artifact to display. The JSON must have this shape:
225
+ {{"show": "figure"|"table"|"none", "scope": "python", "filename": "..."}}
226
+
227
+ - Use "show": "figure" to display a chart image.
228
+ - Use "show": "table" to display a CSV/JSON table.
229
+ - Use "show": "none" if no artifact is relevant.
230
+
231
+ RULES:
232
+ - If the user asks about sales trends or forecasting by title, show sales_trends or arima figures.
233
+ - If the user asks about sentiment, show sentiment figure or sentiment_counts table.
234
+ - If the user asks about forecast accuracy or ARIMA, show arima figures.
235
+ - If the user asks about top sellers, show top_titles_by_units_sold.csv.
236
+ - If the user asks a general data question, pick the most relevant artifact.
237
+ - Keep your answer concise (2-4 sentences), then the JSON block.
238
+ """
239
+
240
+ JSON_BLOCK_RE = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)
241
+ FALLBACK_JSON_RE = re.compile(r"\{[^{}]*\"show\"[^{}]*\}", re.DOTALL)
242
+
243
+
244
+ def _parse_display_directive(text: str) -> Dict[str, str]:
245
+ m = JSON_BLOCK_RE.search(text)
246
+ if m:
247
+ try:
248
+ return json.loads(m.group(1))
249
+ except json.JSONDecodeError:
250
+ pass
251
+ m = FALLBACK_JSON_RE.search(text)
252
+ if m:
253
+ try:
254
+ return json.loads(m.group(0))
255
+ except json.JSONDecodeError:
256
+ pass
257
+ return {"show": "none"}
258
+
259
+
260
+ def _clean_response(text: str) -> str:
261
+ """Strip the JSON directive block from the displayed response."""
262
+ return JSON_BLOCK_RE.sub("", text).strip()
263
+
264
+
265
+ def ai_chat(user_msg: str, history: list):
266
+ """Chat function for the AI Dashboard tab."""
267
+ if not user_msg or not user_msg.strip():
268
+ return history, "", None, None
269
+
270
+ idx = artifacts_index()
271
+ kpis = load_kpis()
272
+
273
+ if not LLM_ENABLED:
274
+ reply, directive = _keyword_fallback(user_msg, idx, kpis)
275
+ else:
276
+ system = DASHBOARD_SYSTEM.format(
277
+ artifacts_json=json.dumps(idx, indent=2),
278
+ kpis_json=json.dumps(kpis, indent=2) if kpis else "(no KPIs yet, run the pipeline first)",
279
+ )
280
+ msgs = [{"role": "system", "content": system}]
281
+ for entry in (history or [])[-6:]:
282
+ msgs.append(entry)
283
+ msgs.append({"role": "user", "content": user_msg})
284
+
285
+ try:
286
+ r = llm_client.chat_completion(
287
+ model=MODEL_NAME,
288
+ messages=msgs,
289
+ temperature=0.3,
290
+ max_tokens=600,
291
+ stream=False,
292
+ )
293
+ raw = (
294
+ r["choices"][0]["message"]["content"]
295
+ if isinstance(r, dict)
296
+ else r.choices[0].message.content
297
+ )
298
+ directive = _parse_display_directive(raw)
299
+ reply = _clean_response(raw)
300
+ except Exception as e:
301
+ reply = f"LLM error: {e}. Falling back to keyword matching."
302
+ reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
303
+ reply += "\n\n" + reply_fb
304
+
305
+ # Resolve artifacts — build interactive Plotly charts when possible
306
+ chart_out = None
307
+ tab_out = None
308
+ show = directive.get("show", "none")
309
+ fname = directive.get("filename", "")
310
+ chart_name = directive.get("chart", "")
311
+
312
+ # Interactive chart builders keyed by name
313
+ chart_builders = {
314
+ "sales": build_sales_chart,
315
+ "sentiment": build_sentiment_chart,
316
+ "top_sellers": build_top_sellers_chart,
317
+ }
318
+
319
+ if chart_name and chart_name in chart_builders:
320
+ chart_out = chart_builders[chart_name]()
321
+ elif show == "figure" and fname:
322
+ # Fallback: try to match filename to a chart builder
323
+ if "sales_trend" in fname:
324
+ chart_out = build_sales_chart()
325
+ elif "sentiment" in fname:
326
+ chart_out = build_sentiment_chart()
327
+ elif "arima" in fname or "forecast" in fname:
328
+ chart_out = build_sales_chart() # closest interactive equivalent
329
+ else:
330
+ chart_out = _empty_chart(f"No interactive chart for {fname}")
331
+
332
+ if show == "table" and fname:
333
+ fp = PY_TAB_DIR / fname
334
+ if fp.exists():
335
+ tab_out = _load_table_safe(fp)
336
+ else:
337
+ reply += f"\n\n*(Could not find table: {fname})*"
338
+
339
+ new_history = (history or []) + [
340
+ {"role": "user", "content": user_msg},
341
+ {"role": "assistant", "content": reply},
342
+ ]
343
+
344
+ return new_history, "", chart_out, tab_out
345
+
346
+
347
+ def _keyword_fallback(msg: str, idx: Dict, kpis: Dict) -> Tuple[str, Dict]:
348
+ """Simple keyword matcher when LLM is unavailable."""
349
+ msg_lower = msg.lower()
350
+
351
+ if not idx["python"]["figures"] and not idx["python"]["tables"]:
352
+ return (
353
+ "No artifacts found yet. Please run the pipeline first (Tab 1), "
354
+ "then come back here to explore the results.",
355
+ {"show": "none"},
356
+ )
357
+
358
+ kpi_text = ""
359
+ if kpis:
360
+ total = kpis.get("total_units_sold", 0)
361
+ kpi_text = (
362
+ f"Quick summary: **{kpis.get('n_titles', '?')}** book titles across "
363
+ f"**{kpis.get('n_months', '?')}** months, with **{total:,.0f}** total units sold."
364
+ )
365
+
366
+ if any(w in msg_lower for w in ["trend", "sales trend", "monthly sale"]):
367
+ return (
368
+ f"Here are the sales trends. {kpi_text}",
369
+ {"show": "figure", "chart": "sales"},
370
+ )
371
+
372
+ if any(w in msg_lower for w in ["sentiment", "review", "positive", "negative"]):
373
+ return (
374
+ f"Here is the sentiment distribution across sampled book titles. {kpi_text}",
375
+ {"show": "figure", "chart": "sentiment"},
376
+ )
377
+
378
+ if any(w in msg_lower for w in ["arima", "forecast", "predict"]):
379
+ return (
380
+ f"Here are the sales trends and forecasts. {kpi_text}",
381
+ {"show": "figure", "chart": "sales"},
382
+ )
383
+
384
+ if any(w in msg_lower for w in ["top", "best sell", "popular", "rank"]):
385
+ return (
386
+ f"Here are the top-selling titles by units sold. {kpi_text}",
387
+ {"show": "table", "scope": "python", "filename": "top_titles_by_units_sold.csv"},
388
+ )
389
+
390
+ if any(w in msg_lower for w in ["price", "pricing", "decision"]):
391
+ return (
392
+ f"Here are the pricing decisions. {kpi_text}",
393
+ {"show": "table", "scope": "python", "filename": "pricing_decisions.csv"},
394
+ )
395
+
396
+ if any(w in msg_lower for w in ["dashboard", "overview", "summary", "kpi"]):
397
+ return (
398
+ f"Dashboard overview: {kpi_text}\n\nAsk me about sales trends, sentiment, forecasts, "
399
+ "pricing, or top sellers to see specific visualizations.",
400
+ {"show": "table", "scope": "python", "filename": "df_dashboard.csv"},
401
+ )
402
+
403
+ # Default
404
+ return (
405
+ f"I can show you various analyses. {kpi_text}\n\n"
406
+ "Try asking about: **sales trends**, **sentiment**, **ARIMA forecasts**, "
407
+ "**pricing decisions**, **top sellers**, or **dashboard overview**.",
408
+ {"show": "none"},
409
+ )
410
+
411
+
412
+ # =========================================================
413
+ # KPI CARDS (BubbleBusters style)
414
+ # =========================================================
415
+
416
+ def render_kpi_cards() -> str:
417
+ kpis = load_kpis()
418
+ if not kpis:
419
+ return (
420
+ '<div style="background:rgba(255,255,255,.65);backdrop-filter:blur(16px);'
421
+ 'border-radius:20px;padding:28px;text-align:center;'
422
+ 'border:1.5px solid rgba(255,255,255,.7);'
423
+ 'box-shadow:0 8px 32px rgba(124,92,191,.08);">'
424
+ '<div style="font-size:36px;margin-bottom:10px;">📊</div>'
425
+ '<div style="color:#a48de8;font-size:14px;'
426
+ 'font-weight:800;margin-bottom:6px;">No data yet</div>'
427
+ '<div style="color:#9d8fc4;font-size:12px;">'
428
+ 'Run the pipeline to populate these cards.</div>'
429
+ '</div>'
430
+ )
431
+
432
+ def card(icon, label, value, colour):
433
+ return f"""
434
+ <div style="background:rgba(255,255,255,.72);backdrop-filter:blur(16px);
435
+ border-radius:20px;padding:18px 14px 16px;text-align:center;
436
+ border:1.5px solid rgba(255,255,255,.8);
437
+ box-shadow:0 4px 16px rgba(124,92,191,.08);
438
+ border-top:3px solid {colour};">
439
+ <div style="font-size:26px;margin-bottom:7px;line-height:1;">{icon}</div>
440
+ <div style="color:#9d8fc4;font-size:9.5px;text-transform:uppercase;
441
+ letter-spacing:1.8px;margin-bottom:7px;font-weight:800;">{label}</div>
442
+ <div style="color:#2d1f4e;font-size:16px;font-weight:800;">{value}</div>
443
+ </div>"""
444
+
445
+ kpi_config = [
446
+ ("n_titles", "📚", "Book Titles", "#a48de8"),
447
+ ("n_months", "📅", "Time Periods", "#7aa6f8"),
448
+ ("total_units_sold", "📦", "Units Sold", "#6ee7c7"),
449
+ ("total_revenue", "💰", "Revenue", "#3dcba8"),
450
+ ]
451
+
452
+ html = (
453
+ '<div style="display:grid;grid-template-columns:repeat(auto-fit,minmax(140px,1fr));'
454
+ 'gap:12px;margin-bottom:24px;">'
455
+ )
456
+ for key, icon, label, colour in kpi_config:
457
+ val = kpis.get(key)
458
+ if val is None:
459
+ continue
460
+ if isinstance(val, (int, float)) and val > 100:
461
+ val = f"{val:,.0f}"
462
+ html += card(icon, label, str(val), colour)
463
+ # Extra KPIs not in config
464
+ known = {k for k, *_ in kpi_config}
465
+ for key, val in kpis.items():
466
+ if key not in known:
467
+ label = key.replace("_", " ").title()
468
+ if isinstance(val, (int, float)) and val > 100:
469
+ val = f"{val:,.0f}"
470
+ html += card("📈", label, str(val), "#8fa8f8")
471
+ html += "</div>"
472
+ return html
473
+
474
+
475
+ # =========================================================
476
+ # INTERACTIVE PLOTLY CHARTS (BubbleBusters style)
477
+ # =========================================================
478
+
479
+ CHART_PALETTE = ["#7c5cbf", "#2ec4a0", "#e8537a", "#e8a230", "#5e8fef",
480
+ "#c45ea8", "#3dbacc", "#a0522d", "#6aaa3a", "#d46060"]
481
+
482
+ def _styled_layout(**kwargs) -> dict:
483
+ defaults = dict(
484
+ template="plotly_white",
485
+ paper_bgcolor="rgba(255,255,255,0.95)",
486
+ plot_bgcolor="rgba(255,255,255,0.98)",
487
+ font=dict(family="system-ui, sans-serif", color="#2d1f4e", size=12),
488
+ margin=dict(l=60, r=20, t=70, b=70),
489
+ legend=dict(
490
+ orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1,
491
+ bgcolor="rgba(255,255,255,0.92)",
492
+ bordercolor="rgba(124,92,191,0.35)", borderwidth=1,
493
+ ),
494
+ title=dict(font=dict(size=15, color="#4b2d8a")),
495
+ )
496
+ defaults.update(kwargs)
497
+ return defaults
498
+
499
+
500
+ def _empty_chart(title: str) -> go.Figure:
501
+ fig = go.Figure()
502
+ fig.update_layout(
503
+ title=title, height=420, template="plotly_white",
504
+ paper_bgcolor="rgba(255,255,255,0.95)",
505
+ annotations=[dict(text="Run the pipeline to generate data",
506
+ x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False,
507
+ font=dict(size=14, color="rgba(124,92,191,0.5)"))],
508
+ )
509
+ return fig
510
+
511
+
512
+ def build_sales_chart() -> go.Figure:
513
+ path = PY_TAB_DIR / "df_dashboard.csv"
514
+ if not path.exists():
515
+ return _empty_chart("Sales Trends — run the pipeline first")
516
+ df = pd.read_csv(path)
517
+ date_col = next((c for c in df.columns if "month" in c.lower() or "date" in c.lower()), None)
518
+ val_cols = [c for c in df.columns if c != date_col and df[c].dtype in ("float64", "int64")]
519
+ if not date_col or not val_cols:
520
+ return _empty_chart("Could not auto-detect columns in df_dashboard.csv")
521
+ df[date_col] = pd.to_datetime(df[date_col], errors="coerce")
522
+ fig = go.Figure()
523
+ for i, col in enumerate(val_cols):
524
+ fig.add_trace(go.Scatter(
525
+ x=df[date_col], y=df[col], name=col.replace("_", " ").title(),
526
+ mode="lines+markers", line=dict(color=CHART_PALETTE[i % len(CHART_PALETTE)], width=2),
527
+ marker=dict(size=4),
528
+ hovertemplate=f"<b>{col.replace('_',' ').title()}</b><br>%{{x|%b %Y}}: %{{y:,.0f}}<extra></extra>",
529
+ ))
530
+ fig.update_layout(**_styled_layout(height=450, hovermode="x unified",
531
+ title=dict(text="Monthly Overview")))
532
+ fig.update_xaxes(gridcolor="rgba(124,92,191,0.15)", showgrid=True)
533
+ fig.update_yaxes(gridcolor="rgba(124,92,191,0.15)", showgrid=True)
534
+ return fig
535
+
536
+
537
+ def build_sentiment_chart() -> go.Figure:
538
+ path = PY_TAB_DIR / "sentiment_counts_sampled.csv"
539
+ if not path.exists():
540
+ return _empty_chart("Sentiment Distribution — run the pipeline first")
541
+ df = pd.read_csv(path)
542
+ title_col = df.columns[0]
543
+ sent_cols = [c for c in ["negative", "neutral", "positive"] if c in df.columns]
544
+ if not sent_cols:
545
+ return _empty_chart("No sentiment columns found in CSV")
546
+ colors = {"negative": "#e8537a", "neutral": "#5e8fef", "positive": "#2ec4a0"}
547
+ fig = go.Figure()
548
+ for col in sent_cols:
549
+ fig.add_trace(go.Bar(
550
+ name=col.title(), y=df[title_col], x=df[col],
551
+ orientation="h", marker_color=colors.get(col, "#888"),
552
+ hovertemplate=f"<b>{col.title()}</b>: %{{x}}<extra></extra>",
553
+ ))
554
+ fig.update_layout(**_styled_layout(
555
+ height=max(400, len(df) * 28), barmode="stack",
556
+ title=dict(text="Sentiment Distribution by Book"),
557
+ ))
558
+ fig.update_xaxes(title="Number of Reviews")
559
+ fig.update_yaxes(autorange="reversed")
560
+ return fig
561
+
562
+
563
+ def build_top_sellers_chart() -> go.Figure:
564
+ path = PY_TAB_DIR / "top_titles_by_units_sold.csv"
565
+ if not path.exists():
566
+ return _empty_chart("Top Sellers — run the pipeline first")
567
+ df = pd.read_csv(path).head(15)
568
+ title_col = next((c for c in df.columns if "title" in c.lower()), df.columns[0])
569
+ val_col = next((c for c in df.columns if "unit" in c.lower() or "sold" in c.lower()), df.columns[-1])
570
+ fig = go.Figure(go.Bar(
571
+ y=df[title_col], x=df[val_col], orientation="h",
572
+ marker=dict(color=df[val_col], colorscale=[[0, "#c5b4f0"], [1, "#7c5cbf"]]),
573
+ hovertemplate="<b>%{y}</b><br>Units: %{x:,.0f}<extra></extra>",
574
+ ))
575
+ fig.update_layout(**_styled_layout(
576
+ height=max(400, len(df) * 30),
577
+ title=dict(text="Top Selling Titles"), showlegend=False,
578
+ ))
579
+ fig.update_yaxes(autorange="reversed")
580
+ fig.update_xaxes(title="Total Units Sold")
581
+ return fig
582
+
583
+
584
+ def refresh_dashboard():
585
+ return render_kpi_cards(), build_sales_chart(), build_sentiment_chart(), build_top_sellers_chart()
586
+
587
+
588
+ # =========================================================
589
+ # UI
590
+ # =========================================================
591
+
592
+ ensure_dirs()
593
+
594
+ def load_css() -> str:
595
+ css_path = BASE_DIR / "style.css"
596
+ return css_path.read_text(encoding="utf-8") if css_path.exists() else ""
597
+
598
+
599
+ with gr.Blocks(title="AIBDM 2026 Workshop App") as demo:
600
+
601
+ gr.Markdown(
602
+ "# SE21 App Template\n"
603
+ "*This is an app template for SE21 students*",
604
+ elem_id="escp_title",
605
+ )
606
+
607
+ # ===========================================================
608
+ # TAB 1 -- Pipeline Runner
609
+ # ===========================================================
610
+ with gr.Tab("Pipeline Runner"):
611
+ gr.Markdown()
612
+
613
+ with gr.Row():
614
+ with gr.Column(scale=1):
615
+ btn_nb1 = gr.Button("Step 1: Data Creation", variant="secondary")
616
+ with gr.Column(scale=1):
617
+ btn_nb2 = gr.Button("Step 2: Python Analysis", variant="secondary")
618
+
619
+ with gr.Row():
620
+ btn_all = gr.Button("Run Full Pipeline (Both Steps)", variant="primary")
621
+
622
+ run_log = gr.Textbox(
623
+ label="Execution Log",
624
+ lines=18,
625
+ max_lines=30,
626
+ interactive=False,
627
+ )
628
+
629
+ btn_nb1.click(run_datacreation, outputs=[run_log])
630
+ btn_nb2.click(run_pythonanalysis, outputs=[run_log])
631
+ btn_all.click(run_full_pipeline, outputs=[run_log])
632
+
633
+ # ===========================================================
634
+ # TAB 2 -- Dashboard (KPIs + Interactive Charts + Gallery)
635
+ # ===========================================================
636
+ with gr.Tab("Dashboard"):
637
+ kpi_html = gr.HTML(value=render_kpi_cards)
638
+
639
+ refresh_btn = gr.Button("Refresh Dashboard", variant="primary")
640
+
641
+ gr.Markdown("#### Interactive Charts")
642
+ chart_sales = gr.Plot(label="Monthly Overview")
643
+ chart_sentiment = gr.Plot(label="Sentiment Distribution")
644
+ chart_top = gr.Plot(label="Top Sellers")
645
+
646
+ gr.Markdown("#### Static Figures (from notebooks)")
647
+ gallery = gr.Gallery(
648
+ label="Generated Figures",
649
+ columns=2,
650
+ height=480,
651
+ object_fit="contain",
652
+ )
653
+
654
+ gr.Markdown("#### Data Tables")
655
+ table_dropdown = gr.Dropdown(
656
+ label="Select a table to view",
657
+ choices=[],
658
+ interactive=True,
659
+ )
660
+ table_display = gr.Dataframe(
661
+ label="Table Preview",
662
+ interactive=False,
663
+ )
664
+
665
+ def _on_refresh():
666
+ kpi, c1, c2, c3 = refresh_dashboard()
667
+ figs, dd, df = refresh_gallery()
668
+ return kpi, c1, c2, c3, figs, dd, df
669
+
670
+ refresh_btn.click(
671
+ _on_refresh,
672
+ outputs=[kpi_html, chart_sales, chart_sentiment, chart_top,
673
+ gallery, table_dropdown, table_display],
674
+ )
675
+ table_dropdown.change(
676
+ on_table_select,
677
+ inputs=[table_dropdown],
678
+ outputs=[table_display],
679
+ )
680
+
681
+ # ===========================================================
682
+ # TAB 3 -- AI Dashboard
683
+ # ===========================================================
684
+ with gr.Tab('"AI" Dashboard'):
685
+ gr.Markdown(
686
+ "### Ask questions, get interactive visualisations\n\n"
687
+ "Type a question and the system will pick the right interactive chart or table. "
688
+ "Currently uses keyword matching. "
689
+ "*Extra credit: integrate a real LLM by setting `HF_API_KEY` in Space secrets.*"
690
+ )
691
+
692
+ with gr.Row(equal_height=True):
693
+ with gr.Column(scale=1):
694
+ chatbot = gr.Chatbot(
695
+ label="Conversation",
696
+ height=380,
697
+ )
698
+ user_input = gr.Textbox(
699
+ label="Ask about your data",
700
+ placeholder="e.g. Show me sales trends / What are the top sellers? / Sentiment analysis",
701
+ lines=1,
702
+ )
703
+ gr.Examples(
704
+ examples=[
705
+ "Show me the sales trends",
706
+ "What does the sentiment look like?",
707
+ "Which titles sell the most?",
708
+ "Show the ARIMA forecasts",
709
+ "What are the pricing decisions?",
710
+ "Give me a dashboard overview",
711
+ ],
712
+ inputs=user_input,
713
+ )
714
+
715
+ with gr.Column(scale=1):
716
+ ai_figure = gr.Plot(
717
+ label="Interactive Chart",
718
+ )
719
+ ai_table = gr.Dataframe(
720
+ label="Data Table",
721
+ interactive=False,
722
+ )
723
+
724
+ user_input.submit(
725
+ ai_chat,
726
+ inputs=[user_input, chatbot],
727
+ outputs=[chatbot, user_input, ai_figure, ai_table],
728
+ )
729
+
730
+
731
+ demo.launch(css=load_css(), allowed_paths=[str(BASE_DIR)])
background_bottom.png ADDED
background_mid.png ADDED
background_top.png ADDED

Git LFS Details

  • SHA256: 27e963d20dbb7ae88368fb527d475c85ef0de3df63d8f0d7d5e2af7403a5b365
  • Pointer size: 131 Bytes
  • Size of remote file: 726 kB
datacreation.ipynb ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "4ba6aba8"
7
+ },
8
+ "source": [
9
+ "# 🤖 **Data Collection, Creation, Storage, and Processing**\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "jpASMyIQMaAq"
16
+ },
17
+ "source": [
18
+ "## **1.** 📦 Install required packages"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 1,
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "f48c8f8c",
29
+ "outputId": "13d0dd5e-82c6-489f-b1f0-e970186a4eb7"
30
+ },
31
+ "outputs": [
32
+ {
33
+ "output_type": "stream",
34
+ "name": "stdout",
35
+ "text": [
36
+ "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (4.13.5)\n",
37
+ "Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (2.2.2)\n",
38
+ "Requirement already satisfied: matplotlib in /usr/local/lib/python3.12/dist-packages (3.10.0)\n",
39
+ "Requirement already satisfied: seaborn in /usr/local/lib/python3.12/dist-packages (0.13.2)\n",
40
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (2.0.2)\n",
41
+ "Requirement already satisfied: textblob in /usr/local/lib/python3.12/dist-packages (0.19.0)\n",
42
+ "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (2.8.3)\n",
43
+ "Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (4.15.0)\n",
44
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas) (2.9.0.post0)\n",
45
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.2)\n",
46
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.3)\n",
47
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.3.3)\n",
48
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (0.12.1)\n",
49
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (4.61.1)\n",
50
+ "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.4.9)\n",
51
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (26.0)\n",
52
+ "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (11.3.0)\n",
53
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (3.3.2)\n",
54
+ "Requirement already satisfied: nltk>=3.9 in /usr/local/lib/python3.12/dist-packages (from textblob) (3.9.1)\n",
55
+ "Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (8.3.1)\n",
56
+ "Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (1.5.3)\n",
57
+ "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (2025.11.3)\n",
58
+ "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (4.67.3)\n",
59
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)\n"
60
+ ]
61
+ }
62
+ ],
63
+ "source": [
64
+ "!pip install beautifulsoup4 pandas matplotlib seaborn numpy textblob"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "lquNYCbfL9IM"
71
+ },
72
+ "source": [
73
+ "## **2.** ⛏ Web-scrape all book titles, prices, and ratings from books.toscrape.com"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "markdown",
78
+ "metadata": {
79
+ "id": "0IWuNpxxYDJF"
80
+ },
81
+ "source": [
82
+ "### *a. Initial setup*\n",
83
+ "Define the base url of the website you will scrape as well as how and what you will scrape"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 2,
89
+ "metadata": {
90
+ "id": "91d52125"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import requests\n",
95
+ "from bs4 import BeautifulSoup\n",
96
+ "import pandas as pd\n",
97
+ "import time\n",
98
+ "\n",
99
+ "base_url = \"https://books.toscrape.com/catalogue/page-{}.html\"\n",
100
+ "headers = {\"User-Agent\": \"Mozilla/5.0\"}\n",
101
+ "\n",
102
+ "titles, prices, ratings = [], [], []"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "metadata": {
108
+ "id": "oCdTsin2Yfp3"
109
+ },
110
+ "source": [
111
+ "### *b. Fill titles, prices, and ratings from the web pages*"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 3,
117
+ "metadata": {
118
+ "id": "xqO5Y3dnYhxt"
119
+ },
120
+ "outputs": [],
121
+ "source": [
122
+ "# Loop through all 50 pages\n",
123
+ "for page in range(1, 51):\n",
124
+ " url = base_url.format(page)\n",
125
+ " response = requests.get(url, headers=headers)\n",
126
+ " soup = BeautifulSoup(response.content, \"html.parser\")\n",
127
+ " books = soup.find_all(\"article\", class_=\"product_pod\")\n",
128
+ "\n",
129
+ " for book in books:\n",
130
+ " titles.append(book.h3.a[\"title\"])\n",
131
+ " prices.append(float(book.find(\"p\", class_=\"price_color\").text[1:]))\n",
132
+ " ratings.append(book.p.get(\"class\")[1])\n",
133
+ "\n",
134
+ " time.sleep(0.5) # polite scraping delay"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "markdown",
139
+ "metadata": {
140
+ "id": "T0TOeRC4Yrnn"
141
+ },
142
+ "source": [
143
+ "### *c. ✋🏻🛑⛔️ Create a dataframe df_books that contains the now complete \"title\", \"price\", and \"rating\" objects*"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": 4,
149
+ "metadata": {
150
+ "id": "l5FkkNhUYTHh"
151
+ },
152
+ "outputs": [],
153
+ "source": [
154
+ "# 🗂️ Create DataFrame\n",
155
+ "df_books = pd.DataFrame({\n",
156
+ " \"title\": titles,\n",
157
+ " \"price\": prices,\n",
158
+ " \"rating\": ratings\n",
159
+ "})"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "markdown",
164
+ "metadata": {
165
+ "id": "duI5dv3CZYvF"
166
+ },
167
+ "source": [
168
+ "### *d. Save web-scraped dataframe either as a CSV or Excel file*"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": 5,
174
+ "metadata": {
175
+ "id": "lC1U_YHtZifh"
176
+ },
177
+ "outputs": [],
178
+ "source": [
179
+ "# 💾 Save to CSV\n",
180
+ "df_books.to_csv(\"books_data.csv\", index=False)\n",
181
+ "\n",
182
+ "# 💾 Or save to Excel\n",
183
+ "# df_books.to_excel(\"books_data.xlsx\", index=False)"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "markdown",
188
+ "metadata": {
189
+ "id": "qMjRKMBQZlJi"
190
+ },
191
+ "source": [
192
+ "### *e. ✋🏻🛑⛔️ View first fiew lines*"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": 6,
198
+ "metadata": {
199
+ "colab": {
200
+ "base_uri": "https://localhost:8080/",
201
+ "height": 0
202
+ },
203
+ "id": "O_wIvTxYZqCK",
204
+ "outputId": "349b36b0-c008-4fd5-d4a4-dba38ae18337"
205
+ },
206
+ "outputs": [
207
+ {
208
+ "output_type": "execute_result",
209
+ "data": {
210
+ "text/plain": [
211
+ " title price rating\n",
212
+ "0 A Light in the Attic 51.77 Three\n",
213
+ "1 Tipping the Velvet 53.74 One\n",
214
+ "2 Soumission 50.10 One\n",
215
+ "3 Sharp Objects 47.82 Four\n",
216
+ "4 Sapiens: A Brief History of Humankind 54.23 Five"
217
+ ],
218
+ "text/html": [
219
+ "\n",
220
+ " <div id=\"df-04c87660-4415-45e9-ad3b-3fa19d9402c2\" class=\"colab-df-container\">\n",
221
+ " <div>\n",
222
+ "<style scoped>\n",
223
+ " .dataframe tbody tr th:only-of-type {\n",
224
+ " vertical-align: middle;\n",
225
+ " }\n",
226
+ "\n",
227
+ " .dataframe tbody tr th {\n",
228
+ " vertical-align: top;\n",
229
+ " }\n",
230
+ "\n",
231
+ " .dataframe thead th {\n",
232
+ " text-align: right;\n",
233
+ " }\n",
234
+ "</style>\n",
235
+ "<table border=\"1\" class=\"dataframe\">\n",
236
+ " <thead>\n",
237
+ " <tr style=\"text-align: right;\">\n",
238
+ " <th></th>\n",
239
+ " <th>title</th>\n",
240
+ " <th>price</th>\n",
241
+ " <th>rating</th>\n",
242
+ " </tr>\n",
243
+ " </thead>\n",
244
+ " <tbody>\n",
245
+ " <tr>\n",
246
+ " <th>0</th>\n",
247
+ " <td>A Light in the Attic</td>\n",
248
+ " <td>51.77</td>\n",
249
+ " <td>Three</td>\n",
250
+ " </tr>\n",
251
+ " <tr>\n",
252
+ " <th>1</th>\n",
253
+ " <td>Tipping the Velvet</td>\n",
254
+ " <td>53.74</td>\n",
255
+ " <td>One</td>\n",
256
+ " </tr>\n",
257
+ " <tr>\n",
258
+ " <th>2</th>\n",
259
+ " <td>Soumission</td>\n",
260
+ " <td>50.10</td>\n",
261
+ " <td>One</td>\n",
262
+ " </tr>\n",
263
+ " <tr>\n",
264
+ " <th>3</th>\n",
265
+ " <td>Sharp Objects</td>\n",
266
+ " <td>47.82</td>\n",
267
+ " <td>Four</td>\n",
268
+ " </tr>\n",
269
+ " <tr>\n",
270
+ " <th>4</th>\n",
271
+ " <td>Sapiens: A Brief History of Humankind</td>\n",
272
+ " <td>54.23</td>\n",
273
+ " <td>Five</td>\n",
274
+ " </tr>\n",
275
+ " </tbody>\n",
276
+ "</table>\n",
277
+ "</div>\n",
278
+ " <div class=\"colab-df-buttons\">\n",
279
+ "\n",
280
+ " <div class=\"colab-df-container\">\n",
281
+ " <button class=\"colab-df-convert\" onclick=\"convertToInteractive('df-04c87660-4415-45e9-ad3b-3fa19d9402c2')\"\n",
282
+ " title=\"Convert this dataframe to an interactive table.\"\n",
283
+ " style=\"display:none;\">\n",
284
+ "\n",
285
+ " <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\" viewBox=\"0 -960 960 960\">\n",
286
+ " <path d=\"M120-120v-720h720v720H120Zm60-500h600v-160H180v160Zm220 220h160v-160H400v160Zm0 220h160v-160H400v160ZM180-400h160v-160H180v160Zm440 0h160v-160H620v160ZM180-180h160v-160H180v160Zm440 0h160v-160H620v160Z\"/>\n",
287
+ " </svg>\n",
288
+ " </button>\n",
289
+ "\n",
290
+ " <style>\n",
291
+ " .colab-df-container {\n",
292
+ " display:flex;\n",
293
+ " gap: 12px;\n",
294
+ " }\n",
295
+ "\n",
296
+ " .colab-df-convert {\n",
297
+ " background-color: #E8F0FE;\n",
298
+ " border: none;\n",
299
+ " border-radius: 50%;\n",
300
+ " cursor: pointer;\n",
301
+ " display: none;\n",
302
+ " fill: #1967D2;\n",
303
+ " height: 32px;\n",
304
+ " padding: 0 0 0 0;\n",
305
+ " width: 32px;\n",
306
+ " }\n",
307
+ "\n",
308
+ " .colab-df-convert:hover {\n",
309
+ " background-color: #E2EBFA;\n",
310
+ " box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
311
+ " fill: #174EA6;\n",
312
+ " }\n",
313
+ "\n",
314
+ " .colab-df-buttons div {\n",
315
+ " margin-bottom: 4px;\n",
316
+ " }\n",
317
+ "\n",
318
+ " [theme=dark] .colab-df-convert {\n",
319
+ " background-color: #3B4455;\n",
320
+ " fill: #D2E3FC;\n",
321
+ " }\n",
322
+ "\n",
323
+ " [theme=dark] .colab-df-convert:hover {\n",
324
+ " background-color: #434B5C;\n",
325
+ " box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
326
+ " filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
327
+ " fill: #FFFFFF;\n",
328
+ " }\n",
329
+ " </style>\n",
330
+ "\n",
331
+ " <script>\n",
332
+ " const buttonEl =\n",
333
+ " document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2 button.colab-df-convert');\n",
334
+ " buttonEl.style.display =\n",
335
+ " google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
336
+ "\n",
337
+ " async function convertToInteractive(key) {\n",
338
+ " const element = document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2');\n",
339
+ " const dataTable =\n",
340
+ " await google.colab.kernel.invokeFunction('convertToInteractive',\n",
341
+ " [key], {});\n",
342
+ " if (!dataTable) return;\n",
343
+ "\n",
344
+ " const docLinkHtml = 'Like what you see? Visit the ' +\n",
345
+ " '<a target=\"_blank\" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'\n",
346
+ " + ' to learn more about interactive tables.';\n",
347
+ " element.innerHTML = '';\n",
348
+ " dataTable['output_type'] = 'display_data';\n",
349
+ " await google.colab.output.renderOutput(dataTable, element);\n",
350
+ " const docLink = document.createElement('div');\n",
351
+ " docLink.innerHTML = docLinkHtml;\n",
352
+ " element.appendChild(docLink);\n",
353
+ " }\n",
354
+ " </script>\n",
355
+ " </div>\n",
356
+ "\n",
357
+ "\n",
358
+ " </div>\n",
359
+ " </div>\n"
360
+ ],
361
+ "application/vnd.google.colaboratory.intrinsic+json": {
362
+ "type": "dataframe",
363
+ "variable_name": "df_books",
364
+ "summary": "{\n \"name\": \"df_books\",\n \"rows\": 1000,\n \"fields\": [\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 999,\n \"samples\": [\n \"The Grownup\",\n \"Persepolis: The Story of a Childhood (Persepolis #1-2)\",\n \"Ayumi's Violin\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 14.446689669952772,\n \"min\": 10.0,\n \"max\": 59.99,\n \"num_unique_values\": 903,\n \"samples\": [\n 19.73,\n 55.65,\n 46.31\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"rating\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"One\",\n \"Two\",\n \"Four\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
365
+ }
366
+ },
367
+ "metadata": {},
368
+ "execution_count": 6
369
+ }
370
+ ],
371
+ "source": [
372
+ "df_books.head()"
373
+ ]
374
+ },
375
+ {
376
+ "cell_type": "markdown",
377
+ "metadata": {
378
+ "id": "p-1Pr2szaqLk"
379
+ },
380
+ "source": [
381
+ "## **3.** 🧩 Create a meaningful connection between real & synthetic datasets"
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "markdown",
386
+ "metadata": {
387
+ "id": "SIaJUGIpaH4V"
388
+ },
389
+ "source": [
390
+ "### *a. Initial setup*"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "code",
395
+ "execution_count": 7,
396
+ "metadata": {
397
+ "id": "-gPXGcRPuV_9"
398
+ },
399
+ "outputs": [],
400
+ "source": [
401
+ "import numpy as np\n",
402
+ "import random\n",
403
+ "from datetime import datetime\n",
404
+ "import warnings\n",
405
+ "\n",
406
+ "warnings.filterwarnings(\"ignore\")\n",
407
+ "random.seed(2025)\n",
408
+ "np.random.seed(2025)"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "markdown",
413
+ "metadata": {
414
+ "id": "pY4yCoIuaQqp"
415
+ },
416
+ "source": [
417
+ "### *b. Generate popularity scores based on rating (with some randomness) with a generate_popularity_score function*"
418
+ ]
419
+ },
420
+ {
421
+ "cell_type": "code",
422
+ "execution_count": 8,
423
+ "metadata": {
424
+ "id": "mnd5hdAbaNjz"
425
+ },
426
+ "outputs": [],
427
+ "source": [
428
+ "def generate_popularity_score(rating):\n",
429
+ " base = {\"One\": 2, \"Two\": 3, \"Three\": 3, \"Four\": 4, \"Five\": 4}.get(rating, 3)\n",
430
+ " trend_factor = random.choices([-1, 0, 1], weights=[1, 3, 2])[0]\n",
431
+ " return int(np.clip(base + trend_factor, 1, 5))"
432
+ ]
433
+ },
434
+ {
435
+ "cell_type": "markdown",
436
+ "metadata": {
437
+ "id": "n4-TaNTFgPak"
438
+ },
439
+ "source": [
440
+ "### *c. ✋🏻🛑⛔️ Run the function to create a \"popularity_score\" column from \"rating\"*"
441
+ ]
442
+ },
443
+ {
444
+ "cell_type": "code",
445
+ "execution_count": 9,
446
+ "metadata": {
447
+ "id": "V-G3OCUCgR07"
448
+ },
449
+ "outputs": [],
450
+ "source": [
451
+ "df_books[\"popularity_score\"] = df_books[\"rating\"].apply(generate_popularity_score)"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "markdown",
456
+ "metadata": {
457
+ "id": "HnngRNTgacYt"
458
+ },
459
+ "source": [
460
+ "### *d. Decide on the sentiment_label based on the popularity score with a get_sentiment function*"
461
+ ]
462
+ },
463
+ {
464
+ "cell_type": "code",
465
+ "execution_count": 10,
466
+ "metadata": {
467
+ "id": "kUtWmr8maZLZ"
468
+ },
469
+ "outputs": [],
470
+ "source": [
471
+ "def get_sentiment(popularity_score):\n",
472
+ " if popularity_score <= 2:\n",
473
+ " return \"negative\"\n",
474
+ " elif popularity_score == 3:\n",
475
+ " return \"neutral\"\n",
476
+ " else:\n",
477
+ " return \"positive\""
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "metadata": {
483
+ "id": "HF9F9HIzgT7Z"
484
+ },
485
+ "source": [
486
+ "### *e. ✋🏻🛑⛔️ Run the function to create a \"sentiment_label\" column from \"popularity_score\"*"
487
+ ]
488
+ },
489
+ {
490
+ "cell_type": "code",
491
+ "execution_count": 11,
492
+ "metadata": {
493
+ "id": "tafQj8_7gYCG"
494
+ },
495
+ "outputs": [],
496
+ "source": [
497
+ "df_books[\"sentiment_label\"] = df_books[\"popularity_score\"].apply(get_sentiment)"
498
+ ]
499
+ },
500
+ {
501
+ "cell_type": "markdown",
502
+ "metadata": {
503
+ "id": "T8AdKkmASq9a"
504
+ },
505
+ "source": [
506
+ "## **4.** 📈 Generate synthetic book sales data of 18 months"
507
+ ]
508
+ },
509
+ {
510
+ "cell_type": "markdown",
511
+ "metadata": {
512
+ "id": "OhXbdGD5fH0c"
513
+ },
514
+ "source": [
515
+ "### *a. Create a generate_sales_profit function that would generate sales patterns based on sentiment_label (with some randomness)*"
516
+ ]
517
+ },
518
+ {
519
+ "cell_type": "code",
520
+ "execution_count": 12,
521
+ "metadata": {
522
+ "id": "qkVhYPXGbgEn"
523
+ },
524
+ "outputs": [],
525
+ "source": [
526
+ "def generate_sales_profile(sentiment):\n",
527
+ " months = pd.date_range(end=datetime.today(), periods=18, freq=\"M\")\n",
528
+ "\n",
529
+ " if sentiment == \"positive\":\n",
530
+ " base = random.randint(200, 300)\n",
531
+ " trend = np.linspace(base, base + random.randint(20, 60), len(months))\n",
532
+ " elif sentiment == \"negative\":\n",
533
+ " base = random.randint(20, 80)\n",
534
+ " trend = np.linspace(base, base - random.randint(10, 30), len(months))\n",
535
+ " else: # neutral\n",
536
+ " base = random.randint(80, 160)\n",
537
+ " trend = np.full(len(months), base + random.randint(-10, 10))\n",
538
+ "\n",
539
+ " seasonality = 10 * np.sin(np.linspace(0, 3 * np.pi, len(months)))\n",
540
+ " noise = np.random.normal(0, 5, len(months))\n",
541
+ " monthly_sales = np.clip(trend + seasonality + noise, a_min=0, a_max=None).astype(int)\n",
542
+ "\n",
543
+ " return list(zip(months.strftime(\"%Y-%m\"), monthly_sales))"
544
+ ]
545
+ },
546
+ {
547
+ "cell_type": "markdown",
548
+ "metadata": {
549
+ "id": "L2ak1HlcgoTe"
550
+ },
551
+ "source": [
552
+ "### *b. Run the function as part of building sales_data*"
553
+ ]
554
+ },
555
+ {
556
+ "cell_type": "code",
557
+ "execution_count": 13,
558
+ "metadata": {
559
+ "id": "SlJ24AUafoDB"
560
+ },
561
+ "outputs": [],
562
+ "source": [
563
+ "sales_data = []\n",
564
+ "for _, row in df_books.iterrows():\n",
565
+ " records = generate_sales_profile(row[\"sentiment_label\"])\n",
566
+ " for month, units in records:\n",
567
+ " sales_data.append({\n",
568
+ " \"title\": row[\"title\"],\n",
569
+ " \"month\": month,\n",
570
+ " \"units_sold\": units,\n",
571
+ " \"sentiment_label\": row[\"sentiment_label\"]\n",
572
+ " })"
573
+ ]
574
+ },
575
+ {
576
+ "cell_type": "markdown",
577
+ "metadata": {
578
+ "id": "4IXZKcCSgxnq"
579
+ },
580
+ "source": [
581
+ "### *c. ✋🏻🛑⛔️ Create a df_sales DataFrame from sales_data*"
582
+ ]
583
+ },
584
+ {
585
+ "cell_type": "code",
586
+ "execution_count": 14,
587
+ "metadata": {
588
+ "id": "wcN6gtiZg-ws"
589
+ },
590
+ "outputs": [],
591
+ "source": [
592
+ "df_sales = pd.DataFrame(sales_data)"
593
+ ]
594
+ },
595
+ {
596
+ "cell_type": "markdown",
597
+ "metadata": {
598
+ "id": "EhIjz9WohAmZ"
599
+ },
600
+ "source": [
601
+ "### *d. Save df_sales as synthetic_sales_data.csv & view first few lines*"
602
+ ]
603
+ },
604
+ {
605
+ "cell_type": "code",
606
+ "execution_count": 15,
607
+ "metadata": {
608
+ "colab": {
609
+ "base_uri": "https://localhost:8080/"
610
+ },
611
+ "id": "MzbZvLcAhGaH",
612
+ "outputId": "c692bb04-7263-4115-a2ba-c72fe0180722"
613
+ },
614
+ "outputs": [
615
+ {
616
+ "output_type": "stream",
617
+ "name": "stdout",
618
+ "text": [
619
+ " title month units_sold sentiment_label\n",
620
+ "0 A Light in the Attic 2024-08 100 neutral\n",
621
+ "1 A Light in the Attic 2024-09 109 neutral\n",
622
+ "2 A Light in the Attic 2024-10 102 neutral\n",
623
+ "3 A Light in the Attic 2024-11 107 neutral\n",
624
+ "4 A Light in the Attic 2024-12 108 neutral\n"
625
+ ]
626
+ }
627
+ ],
628
+ "source": [
629
+ "df_sales.to_csv(\"synthetic_sales_data.csv\", index=False)\n",
630
+ "\n",
631
+ "print(df_sales.head())"
632
+ ]
633
+ },
634
+ {
635
+ "cell_type": "markdown",
636
+ "metadata": {
637
+ "id": "7g9gqBgQMtJn"
638
+ },
639
+ "source": [
640
+ "## **5.** 🎯 Generate synthetic customer reviews"
641
+ ]
642
+ },
643
+ {
644
+ "cell_type": "markdown",
645
+ "metadata": {
646
+ "id": "Gi4y9M9KuDWx"
647
+ },
648
+ "source": [
649
+ "### *a. ✋🏻🛑⛔️ Ask ChatGPT to create a list of 50 distinct generic book review texts for the sentiment labels \"positive\", \"neutral\", and \"negative\" called synthetic_reviews_by_sentiment*"
650
+ ]
651
+ },
652
+ {
653
+ "cell_type": "code",
654
+ "execution_count": 16,
655
+ "metadata": {
656
+ "id": "b3cd2a50"
657
+ },
658
+ "outputs": [],
659
+ "source": [
660
+ "synthetic_reviews_by_sentiment = {\n",
661
+ " \"positive\": [\n",
662
+ " \"A compelling and heartwarming read that stayed with me long after I finished.\",\n",
663
+ " \"Brilliantly written! The characters were unforgettable and the plot was engaging.\",\n",
664
+ " \"One of the best books I've read this year — inspiring and emotionally rich.\",\n",
665
+ " \"The author's storytelling was vivid and powerful. Highly recommended!\",\n",
666
+ " \"An absolute masterpiece. I couldn't put it down from start to finish.\",\n",
667
+ " \"Gripping, intelligent, and beautifully crafted — I loved every page.\",\n",
668
+ " \"The emotional depth and layered narrative were just perfect.\",\n",
669
+ " \"A thought-provoking journey with stunning character development.\",\n",
670
+ " \"Everything about this book just clicked. A top-tier read!\",\n",
671
+ " \"A flawless blend of emotion, intrigue, and style. Truly impressive.\",\n",
672
+ " \"Absolutely stunning work of fiction. Five stars from me.\",\n",
673
+ " \"Remarkably executed with breathtaking prose.\",\n",
674
+ " \"The pacing was perfect and I was hooked from page one.\",\n",
675
+ " \"Heartfelt and hopeful — a story well worth telling.\",\n",
676
+ " \"A vivid journey through complex emotions and stunning imagery.\",\n",
677
+ " \"This book had soul. Every word felt like it mattered.\",\n",
678
+ " \"It delivered more than I ever expected. Powerful and wise.\",\n",
679
+ " \"The characters leapt off the page and into my heart.\",\n",
680
+ " \"I could see every scene clearly in my mind — beautifully descriptive.\",\n",
681
+ " \"Refreshing, original, and impossible to forget.\",\n",
682
+ " \"A radiant celebration of resilience and love.\",\n",
683
+ " \"Powerful themes handled with grace and insight.\",\n",
684
+ " \"An unforgettable literary experience.\",\n",
685
+ " \"The best book club pick we’ve had all year.\",\n",
686
+ " \"A layered, lyrical narrative that resonates deeply.\",\n",
687
+ " \"Surprising, profound, and deeply humane.\",\n",
688
+ " \"One of those rare books I wish I could read again for the first time.\",\n",
689
+ " \"Both epic and intimate — a perfect balance.\",\n",
690
+ " \"It reads like a love letter to the human spirit.\",\n",
691
+ " \"Satisfying and uplifting with a memorable ending.\",\n",
692
+ " \"This novel deserves every bit of praise it gets.\",\n",
693
+ " \"Introspective, emotional, and elegantly composed.\",\n",
694
+ " \"A tour de force in contemporary fiction.\",\n",
695
+ " \"Left me smiling, teary-eyed, and completely fulfilled.\",\n",
696
+ " \"A novel with the rare ability to entertain and enlighten.\",\n",
697
+ " \"Incredibly moving. I highlighted so many lines.\",\n",
698
+ " \"A smart, sensitive take on relationships and identity.\",\n",
699
+ " \"You feel wiser by the end of it.\",\n",
700
+ " \"A gorgeously crafted tale about hope and second chances.\",\n",
701
+ " \"Poignant and real — a beautiful escape.\",\n",
702
+ " \"Brims with insight and authenticity.\",\n",
703
+ " \"Compelling characters and a satisfying plot.\",\n",
704
+ " \"An empowering and important read.\",\n",
705
+ " \"Elegantly crafted and deeply humane.\",\n",
706
+ " \"Taut storytelling that never lets go.\",\n",
707
+ " \"Each chapter offered a new treasure.\",\n",
708
+ " \"Lyrical writing that stays with you.\",\n",
709
+ " \"A wonderful blend of passion and thoughtfulness.\",\n",
710
+ " \"Uplifting, honest, and completely engrossing.\",\n",
711
+ " \"This one made me believe in storytelling again.\"\n",
712
+ " ],\n",
713
+ " \"neutral\": [\n",
714
+ " \"An average book — not great, but not bad either.\",\n",
715
+ " \"Some parts really stood out, others felt a bit flat.\",\n",
716
+ " \"It was okay overall. A decent way to pass the time.\",\n",
717
+ " \"The writing was fine, though I didn’t fully connect with the story.\",\n",
718
+ " \"Had a few memorable moments but lacked depth in some areas.\",\n",
719
+ " \"A mixed experience — neither fully engaging nor forgettable.\",\n",
720
+ " \"There was potential, but it didn't quite come together for me.\",\n",
721
+ " \"A reasonable effort that just didn’t leave a lasting impression.\",\n",
722
+ " \"Serviceable but not something I'd go out of my way to recommend.\",\n",
723
+ " \"Not much to dislike, but not much to rave about either.\",\n",
724
+ " \"It had its strengths, though they didn’t shine consistently.\",\n",
725
+ " \"I’m on the fence — parts were enjoyable, others not so much.\",\n",
726
+ " \"The book had a unique concept but lacked execution.\",\n",
727
+ " \"A middle-of-the-road read.\",\n",
728
+ " \"Engaging at times, but it lost momentum.\",\n",
729
+ " \"Would have benefited from stronger character development.\",\n",
730
+ " \"It passed the time, but I wouldn't reread it.\",\n",
731
+ " \"The plot had some holes that affected immersion.\",\n",
732
+ " \"Mediocre pacing made it hard to stay invested.\",\n",
733
+ " \"Satisfying in parts, underwhelming in others.\",\n",
734
+ " \"Neutral on this one — didn’t love it or hate it.\",\n",
735
+ " \"Fairly forgettable but with glimpses of promise.\",\n",
736
+ " \"The themes were solid, but not well explored.\",\n",
737
+ " \"Competent, just not compelling.\",\n",
738
+ " \"Had moments of clarity and moments of confusion.\",\n",
739
+ " \"I didn’t regret reading it, but I wouldn’t recommend it.\",\n",
740
+ " \"Readable, yet uninspired.\",\n",
741
+ " \"There was a spark, but it didn’t ignite.\",\n",
742
+ " \"A slow burn that didn’t quite catch fire.\",\n",
743
+ " \"I expected more nuance given the premise.\",\n",
744
+ " \"A safe, inoffensive choice.\",\n",
745
+ " \"Some parts lagged, others piqued my interest.\",\n",
746
+ " \"Decent, but needed polish.\",\n",
747
+ " \"Moderately engaging but didn’t stick the landing.\",\n",
748
+ " \"It simply lacked that emotional punch.\",\n",
749
+ " \"Just fine — no better, no worse.\",\n",
750
+ " \"Some thoughtful passages amid otherwise dry writing.\",\n",
751
+ " \"I appreciated the ideas more than the execution.\",\n",
752
+ " \"Struggled with cohesion.\",\n",
753
+ " \"Solidly average.\",\n",
754
+ " \"Good on paper, flat in practice.\",\n",
755
+ " \"A few bright spots, but mostly dim.\",\n",
756
+ " \"The kind of book that fades from memory.\",\n",
757
+ " \"It scratched the surface but didn’t dig deep.\",\n",
758
+ " \"Standard fare with some promise.\",\n",
759
+ " \"Okay, but not memorable.\",\n",
760
+ " \"Had potential that went unrealized.\",\n",
761
+ " \"Could have been tighter, sharper, deeper.\",\n",
762
+ " \"A blend of mediocrity and mild interest.\",\n",
763
+ " \"I kept reading, but barely.\"\n",
764
+ " ],\n",
765
+ " \"negative\": [\n",
766
+ " \"I struggled to get through this one — it just didn’t grab me.\",\n",
767
+ " \"The plot was confusing and the characters felt underdeveloped.\",\n",
768
+ " \"Disappointing. I had high hopes, but they weren't met.\",\n",
769
+ " \"Uninspired writing and a story that never quite took off.\",\n",
770
+ " \"Unfortunately, it was dull and predictable throughout.\",\n",
771
+ " \"The pacing dragged and I couldn’t find anything compelling.\",\n",
772
+ " \"This felt like a chore to read — lacked heart and originality.\",\n",
773
+ " \"Nothing really worked for me in this book.\",\n",
774
+ " \"A frustrating read that left me unsatisfied.\",\n",
775
+ " \"I kept hoping it would improve, but it never did.\",\n",
776
+ " \"The characters didn’t feel real, and the dialogue was forced.\",\n",
777
+ " \"I couldn't connect with the story at all.\",\n",
778
+ " \"A slow, meandering narrative with little payoff.\",\n",
779
+ " \"Tried too hard to be deep, but just felt empty.\",\n",
780
+ " \"The tone was uneven and confusing.\",\n",
781
+ " \"Way too repetitive and lacking progression.\",\n",
782
+ " \"The ending was abrupt and unsatisfying.\",\n",
783
+ " \"No emotional resonance — I felt nothing throughout.\",\n",
784
+ " \"I expected much more, but this fell flat.\",\n",
785
+ " \"Poorly edited and full of clichés.\",\n",
786
+ " \"The premise was interesting, but poorly executed.\",\n",
787
+ " \"Just didn’t live up to the praise.\",\n",
788
+ " \"A disjointed mess from start to finish.\",\n",
789
+ " \"Overly long and painfully dull.\",\n",
790
+ " \"Dialogue that felt robotic and unrealistic.\",\n",
791
+ " \"A hollow shell of what it could’ve been.\",\n",
792
+ " \"It lacked a coherent structure.\",\n",
793
+ " \"More confusing than complex.\",\n",
794
+ " \"Reading it felt like a task, not a treat.\",\n",
795
+ " \"There was no tension, no emotion — just words.\",\n",
796
+ " \"Characters with no motivation or development.\",\n",
797
+ " \"The plot twists were nonsensical.\",\n",
798
+ " \"Regret buying this book.\",\n",
799
+ " \"Nothing drew me in, nothing made me stay.\",\n",
800
+ " \"Too many subplots and none were satisfying.\",\n",
801
+ " \"Tedious and unimaginative.\",\n",
802
+ " \"Like reading a rough draft.\",\n",
803
+ " \"Disjointed, distant, and disappointing.\",\n",
804
+ " \"A lot of buildup with no payoff.\",\n",
805
+ " \"I don’t understand the hype.\",\n",
806
+ " \"This book simply didn’t work.\",\n",
807
+ " \"Forgettable in every sense.\",\n",
808
+ " \"More effort should’ve gone into editing.\",\n",
809
+ " \"The story lost its way early on.\",\n",
810
+ " \"It dragged endlessly.\",\n",
811
+ " \"I kept checking how many pages were left.\",\n",
812
+ " \"This lacked vision and clarity.\",\n",
813
+ " \"I expected substance — got fluff.\",\n",
814
+ " \"It failed to make me care.\"\n",
815
+ " ]\n",
816
+ "}"
817
+ ]
818
+ },
819
+ {
820
+ "cell_type": "markdown",
821
+ "metadata": {
822
+ "id": "fQhfVaDmuULT"
823
+ },
824
+ "source": [
825
+ "### *b. Generate 10 reviews per book using random sampling from the corresponding 50*"
826
+ ]
827
+ },
828
+ {
829
+ "cell_type": "code",
830
+ "execution_count": 17,
831
+ "metadata": {
832
+ "id": "l2SRc3PjuTGM"
833
+ },
834
+ "outputs": [],
835
+ "source": [
836
+ "review_rows = []\n",
837
+ "for _, row in df_books.iterrows():\n",
838
+ " title = row['title']\n",
839
+ " sentiment_label = row['sentiment_label']\n",
840
+ " review_pool = synthetic_reviews_by_sentiment[sentiment_label]\n",
841
+ " sampled_reviews = random.sample(review_pool, 10)\n",
842
+ " for review_text in sampled_reviews:\n",
843
+ " review_rows.append({\n",
844
+ " \"title\": title,\n",
845
+ " \"sentiment_label\": sentiment_label,\n",
846
+ " \"review_text\": review_text,\n",
847
+ " \"rating\": row['rating'],\n",
848
+ " \"popularity_score\": row['popularity_score']\n",
849
+ " })"
850
+ ]
851
+ },
852
+ {
853
+ "cell_type": "markdown",
854
+ "metadata": {
855
+ "id": "bmJMXF-Bukdm"
856
+ },
857
+ "source": [
858
+ "### *c. Create the final dataframe df_reviews & save it as synthetic_book_reviews.csv*"
859
+ ]
860
+ },
861
+ {
862
+ "cell_type": "code",
863
+ "execution_count": 18,
864
+ "metadata": {
865
+ "id": "ZUKUqZsuumsp"
866
+ },
867
+ "outputs": [],
868
+ "source": [
869
+ "df_reviews = pd.DataFrame(review_rows)\n",
870
+ "df_reviews.to_csv(\"synthetic_book_reviews.csv\", index=False)"
871
+ ]
872
+ },
873
+ {
874
+ "cell_type": "code",
875
+ "execution_count": 19,
876
+ "metadata": {
877
+ "colab": {
878
+ "base_uri": "https://localhost:8080/"
879
+ },
880
+ "id": "3946e521",
881
+ "outputId": "514d7bef-0488-4933-b03c-953b9e8a7f66"
882
+ },
883
+ "outputs": [
884
+ {
885
+ "output_type": "stream",
886
+ "name": "stdout",
887
+ "text": [
888
+ "✅ Wrote synthetic_title_level_features.csv\n",
889
+ "✅ Wrote synthetic_monthly_revenue_series.csv\n"
890
+ ]
891
+ }
892
+ ],
893
+ "source": [
894
+ "\n",
895
+ "# ============================================================\n",
896
+ "# ✅ Create \"R-ready\" derived inputs (root-level files)\n",
897
+ "# ============================================================\n",
898
+ "# These two files make the R notebook robust and fast:\n",
899
+ "# 1) synthetic_title_level_features.csv -> regression-ready, one row per title\n",
900
+ "# 2) synthetic_monthly_revenue_series.csv -> forecasting-ready, one row per month\n",
901
+ "\n",
902
+ "import numpy as np\n",
903
+ "\n",
904
+ "def _safe_num(s):\n",
905
+ " return pd.to_numeric(\n",
906
+ " pd.Series(s).astype(str).str.replace(r\"[^0-9.]\", \"\", regex=True),\n",
907
+ " errors=\"coerce\"\n",
908
+ " )\n",
909
+ "\n",
910
+ "# --- Clean book metadata (price/rating) ---\n",
911
+ "df_books_r = df_books.copy()\n",
912
+ "if \"price\" in df_books_r.columns:\n",
913
+ " df_books_r[\"price\"] = _safe_num(df_books_r[\"price\"])\n",
914
+ "if \"rating\" in df_books_r.columns:\n",
915
+ " df_books_r[\"rating\"] = _safe_num(df_books_r[\"rating\"])\n",
916
+ "\n",
917
+ "df_books_r[\"title\"] = df_books_r[\"title\"].astype(str).str.strip()\n",
918
+ "\n",
919
+ "# --- Clean sales ---\n",
920
+ "df_sales_r = df_sales.copy()\n",
921
+ "df_sales_r[\"title\"] = df_sales_r[\"title\"].astype(str).str.strip()\n",
922
+ "df_sales_r[\"month\"] = pd.to_datetime(df_sales_r[\"month\"], errors=\"coerce\")\n",
923
+ "df_sales_r[\"units_sold\"] = _safe_num(df_sales_r[\"units_sold\"])\n",
924
+ "\n",
925
+ "# --- Clean reviews ---\n",
926
+ "df_reviews_r = df_reviews.copy()\n",
927
+ "df_reviews_r[\"title\"] = df_reviews_r[\"title\"].astype(str).str.strip()\n",
928
+ "df_reviews_r[\"sentiment_label\"] = df_reviews_r[\"sentiment_label\"].astype(str).str.lower().str.strip()\n",
929
+ "if \"rating\" in df_reviews_r.columns:\n",
930
+ " df_reviews_r[\"rating\"] = _safe_num(df_reviews_r[\"rating\"])\n",
931
+ "if \"popularity_score\" in df_reviews_r.columns:\n",
932
+ " df_reviews_r[\"popularity_score\"] = _safe_num(df_reviews_r[\"popularity_score\"])\n",
933
+ "\n",
934
+ "# --- Sentiment shares per title (from reviews) ---\n",
935
+ "sent_counts = (\n",
936
+ " df_reviews_r.groupby([\"title\", \"sentiment_label\"])\n",
937
+ " .size()\n",
938
+ " .unstack(fill_value=0)\n",
939
+ ")\n",
940
+ "for lab in [\"positive\", \"neutral\", \"negative\"]:\n",
941
+ " if lab not in sent_counts.columns:\n",
942
+ " sent_counts[lab] = 0\n",
943
+ "\n",
944
+ "sent_counts[\"total_reviews\"] = sent_counts[[\"positive\", \"neutral\", \"negative\"]].sum(axis=1)\n",
945
+ "den = sent_counts[\"total_reviews\"].replace(0, np.nan)\n",
946
+ "sent_counts[\"share_positive\"] = sent_counts[\"positive\"] / den\n",
947
+ "sent_counts[\"share_neutral\"] = sent_counts[\"neutral\"] / den\n",
948
+ "sent_counts[\"share_negative\"] = sent_counts[\"negative\"] / den\n",
949
+ "sent_counts = sent_counts.reset_index()\n",
950
+ "\n",
951
+ "# --- Sales aggregation per title ---\n",
952
+ "sales_by_title = (\n",
953
+ " df_sales_r.dropna(subset=[\"title\"])\n",
954
+ " .groupby(\"title\", as_index=False)\n",
955
+ " .agg(\n",
956
+ " months_observed=(\"month\", \"nunique\"),\n",
957
+ " avg_units_sold=(\"units_sold\", \"mean\"),\n",
958
+ " total_units_sold=(\"units_sold\", \"sum\"),\n",
959
+ " )\n",
960
+ ")\n",
961
+ "\n",
962
+ "# --- Title-level features (join sales + books + sentiment) ---\n",
963
+ "df_title = (\n",
964
+ " sales_by_title\n",
965
+ " .merge(df_books_r[[\"title\", \"price\", \"rating\"]], on=\"title\", how=\"left\")\n",
966
+ " .merge(sent_counts[[\"title\", \"share_positive\", \"share_neutral\", \"share_negative\", \"total_reviews\"]],\n",
967
+ " on=\"title\", how=\"left\")\n",
968
+ ")\n",
969
+ "\n",
970
+ "df_title[\"avg_revenue\"] = df_title[\"avg_units_sold\"] * df_title[\"price\"]\n",
971
+ "df_title[\"total_revenue\"] = df_title[\"total_units_sold\"] * df_title[\"price\"]\n",
972
+ "\n",
973
+ "df_title.to_csv(\"synthetic_title_level_features.csv\", index=False)\n",
974
+ "print(\"✅ Wrote synthetic_title_level_features.csv\")\n",
975
+ "\n",
976
+ "# --- Monthly revenue series (proxy: units_sold * price) ---\n",
977
+ "monthly_rev = (\n",
978
+ " df_sales_r.merge(df_books_r[[\"title\", \"price\"]], on=\"title\", how=\"left\")\n",
979
+ ")\n",
980
+ "monthly_rev[\"revenue\"] = monthly_rev[\"units_sold\"] * monthly_rev[\"price\"]\n",
981
+ "\n",
982
+ "df_monthly = (\n",
983
+ " monthly_rev.dropna(subset=[\"month\"])\n",
984
+ " .groupby(\"month\", as_index=False)[\"revenue\"]\n",
985
+ " .sum()\n",
986
+ " .rename(columns={\"revenue\": \"total_revenue\"})\n",
987
+ " .sort_values(\"month\")\n",
988
+ ")\n",
989
+ "# if revenue is all NA (e.g., missing price), fallback to units_sold as a teaching proxy\n",
990
+ "if df_monthly[\"total_revenue\"].notna().sum() == 0:\n",
991
+ " df_monthly = (\n",
992
+ " df_sales_r.dropna(subset=[\"month\"])\n",
993
+ " .groupby(\"month\", as_index=False)[\"units_sold\"]\n",
994
+ " .sum()\n",
995
+ " .rename(columns={\"units_sold\": \"total_revenue\"})\n",
996
+ " .sort_values(\"month\")\n",
997
+ " )\n",
998
+ "\n",
999
+ "df_monthly[\"month\"] = pd.to_datetime(df_monthly[\"month\"], errors=\"coerce\").dt.strftime(\"%Y-%m-%d\")\n",
1000
+ "df_monthly.to_csv(\"synthetic_monthly_revenue_series.csv\", index=False)\n",
1001
+ "print(\"✅ Wrote synthetic_monthly_revenue_series.csv\")\n"
1002
+ ]
1003
+ },
1004
+ {
1005
+ "cell_type": "markdown",
1006
+ "metadata": {
1007
+ "id": "RYvGyVfXuo54"
1008
+ },
1009
+ "source": [
1010
+ "### *d. ✋🏻🛑⛔️ View the first few lines*"
1011
+ ]
1012
+ },
1013
+ {
1014
+ "cell_type": "code",
1015
+ "execution_count": 20,
1016
+ "metadata": {
1017
+ "colab": {
1018
+ "base_uri": "https://localhost:8080/"
1019
+ },
1020
+ "id": "xfE8NMqOurKo",
1021
+ "outputId": "191730ba-d5e2-4df7-97d2-99feb0b704af"
1022
+ },
1023
+ "outputs": [
1024
+ {
1025
+ "output_type": "stream",
1026
+ "name": "stdout",
1027
+ "text": [
1028
+ " title sentiment_label \\\n",
1029
+ "0 A Light in the Attic neutral \n",
1030
+ "1 A Light in the Attic neutral \n",
1031
+ "2 A Light in the Attic neutral \n",
1032
+ "3 A Light in the Attic neutral \n",
1033
+ "4 A Light in the Attic neutral \n",
1034
+ "\n",
1035
+ " review_text rating popularity_score \n",
1036
+ "0 Had potential that went unrealized. Three 3 \n",
1037
+ "1 The themes were solid, but not well explored. Three 3 \n",
1038
+ "2 It simply lacked that emotional punch. Three 3 \n",
1039
+ "3 Serviceable but not something I'd go out of my... Three 3 \n",
1040
+ "4 Standard fare with some promise. Three 3 \n"
1041
+ ]
1042
+ }
1043
+ ],
1044
+ "source": [
1045
+ "print(df_reviews.head())"
1046
+ ]
1047
+ }
1048
+ ],
1049
+ "metadata": {
1050
+ "colab": {
1051
+ "collapsed_sections": [
1052
+ "jpASMyIQMaAq",
1053
+ "lquNYCbfL9IM",
1054
+ "0IWuNpxxYDJF",
1055
+ "oCdTsin2Yfp3",
1056
+ "T0TOeRC4Yrnn",
1057
+ "duI5dv3CZYvF",
1058
+ "qMjRKMBQZlJi",
1059
+ "p-1Pr2szaqLk",
1060
+ "SIaJUGIpaH4V",
1061
+ "pY4yCoIuaQqp",
1062
+ "n4-TaNTFgPak",
1063
+ "HnngRNTgacYt",
1064
+ "HF9F9HIzgT7Z",
1065
+ "T8AdKkmASq9a",
1066
+ "OhXbdGD5fH0c",
1067
+ "L2ak1HlcgoTe",
1068
+ "4IXZKcCSgxnq",
1069
+ "EhIjz9WohAmZ",
1070
+ "Gi4y9M9KuDWx",
1071
+ "fQhfVaDmuULT",
1072
+ "bmJMXF-Bukdm",
1073
+ "RYvGyVfXuo54"
1074
+ ],
1075
+ "provenance": []
1076
+ },
1077
+ "kernelspec": {
1078
+ "display_name": "Python 3",
1079
+ "name": "python3"
1080
+ },
1081
+ "language_info": {
1082
+ "name": "python"
1083
+ }
1084
+ },
1085
+ "nbformat": 4,
1086
+ "nbformat_minor": 0
1087
+ }
pythonanalysis.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==6.0.0
2
+ pandas>=2.0.0
3
+ numpy>=1.24.0
4
+ matplotlib>=3.7.0
5
+ seaborn>=0.13.0
6
+ statsmodels>=0.14.0
7
+ scikit-learn>=1.3.0
8
+ papermill>=2.5.0
9
+ nbformat>=5.9.0
10
+ pillow>=10.0.0
11
+ requests>=2.31.0
12
+ beautifulsoup4>=4.12.0
13
+ vaderSentiment>=3.3.2
14
+ huggingface_hub>=0.20.0
15
+ textblob>=0.18.0
16
+ faker>=20.0.0
17
+ plotly>=5.18.0
style.css ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* --- Target the Gradio app wrapper for backgrounds --- */
2
+ gradio-app,
3
+ .gradio-app,
4
+ .main,
5
+ #app,
6
+ [data-testid="app"] {
7
+ background-color: rgb(40,9,109) !important;
8
+ background-image:
9
+ url('https://huggingface.co/spaces/atascioglu/SE21AppTemplate/resolve/main/background_top.png'),
10
+ url('https://huggingface.co/spaces/atascioglu/SE21AppTemplate/resolve/main/background_mid.png'),
11
+ url('https://huggingface.co/spaces/atascioglu/SE21AppTemplate/resolve/main/background_bottom.png') !important;
12
+ background-position:
13
+ top center,
14
+ 0 913px,
15
+ bottom center !important;
16
+ background-repeat:
17
+ no-repeat,
18
+ repeat-y,
19
+ no-repeat !important;
20
+ background-size:
21
+ 100% auto,
22
+ 100% auto,
23
+ 100% auto !important;
24
+ min-height: 100vh !important;
25
+ }
26
+
27
+ /* --- Fallback on html/body --- */
28
+ html, body {
29
+ background-color: rgb(40,9,109) !important;
30
+ margin: 0 !important;
31
+ padding: 0 !important;
32
+ min-height: 100vh !important;
33
+ }
34
+
35
+ /* Bottom image is now part of the main background layers (positioned at bottom center) */
36
+
37
+ /* --- Main container --- */
38
+ .gradio-container {
39
+ max-width: 1400px !important;
40
+ width: 94vw !important;
41
+ margin: 0 auto !important;
42
+ padding-top: 220px !important;
43
+ padding-bottom: 150px !important;
44
+ background: transparent !important;
45
+ }
46
+
47
+ /* --- Title in ESCP gold --- */
48
+ #escp_title h1 {
49
+ color: rgb(242,198,55) !important;
50
+ font-size: 3rem !important;
51
+ font-weight: 800 !important;
52
+ text-align: center !important;
53
+ margin: 0 0 12px 0 !important;
54
+ }
55
+
56
+ /* --- Subtitle --- */
57
+ #escp_title p, #escp_title em {
58
+ color: rgba(255,255,255,0.85) !important;
59
+ text-align: center !important;
60
+ }
61
+
62
+ /* --- Tab bar background --- */
63
+ .tabs > .tab-nav,
64
+ .tab-nav,
65
+ div[role="tablist"],
66
+ .svelte-tabs > .tab-nav {
67
+ background: rgba(40,9,109,0.6) !important;
68
+ border-radius: 10px 10px 0 0 !important;
69
+ padding: 4px !important;
70
+ }
71
+
72
+ /* --- ALL tab buttons: force white text --- */
73
+ .tabs > .tab-nav button,
74
+ .tab-nav button,
75
+ div[role="tablist"] button,
76
+ button[role="tab"],
77
+ .svelte-tabs button,
78
+ .tab-nav > button,
79
+ .tabs button {
80
+ color: #ffffff !important;
81
+ font-weight: 600 !important;
82
+ border: none !important;
83
+ background: transparent !important;
84
+ padding: 10px 20px !important;
85
+ border-radius: 8px 8px 0 0 !important;
86
+ opacity: 1 !important;
87
+ }
88
+
89
+ /* --- Selected tab: ESCP gold --- */
90
+ .tabs > .tab-nav button.selected,
91
+ .tab-nav button.selected,
92
+ button[role="tab"][aria-selected="true"],
93
+ button[role="tab"].selected,
94
+ div[role="tablist"] button[aria-selected="true"],
95
+ .svelte-tabs button.selected {
96
+ color: rgb(242,198,55) !important;
97
+ background: rgba(255,255,255,0.12) !important;
98
+ }
99
+
100
+ /* --- Unselected tabs: ensure visibility --- */
101
+ .tabs > .tab-nav button:not(.selected),
102
+ .tab-nav button:not(.selected),
103
+ button[role="tab"][aria-selected="false"],
104
+ button[role="tab"]:not(.selected),
105
+ div[role="tablist"] button:not([aria-selected="true"]) {
106
+ color: #ffffff !important;
107
+ opacity: 1 !important;
108
+ }
109
+
110
+ /* --- White card panels --- */
111
+ .gradio-container .gr-block,
112
+ .gradio-container .gr-box,
113
+ .gradio-container .gr-panel,
114
+ .gradio-container .gr-group {
115
+ background: #ffffff !important;
116
+ border-radius: 10px !important;
117
+ }
118
+
119
+ /* --- Tab content area --- */
120
+ .tabitem {
121
+ background: rgba(255,255,255,0.95) !important;
122
+ border-radius: 0 0 10px 10px !important;
123
+ padding: 16px !important;
124
+ }
125
+
126
+ /* --- Inputs --- */
127
+ .gradio-container input,
128
+ .gradio-container textarea,
129
+ .gradio-container select {
130
+ background: #ffffff !important;
131
+ border: 1px solid #d1d5db !important;
132
+ border-radius: 8px !important;
133
+ }
134
+
135
+ /* --- Buttons: ESCP purple primary --- */
136
+ .gradio-container button:not([role="tab"]) {
137
+ font-weight: 600 !important;
138
+ padding: 10px 16px !important;
139
+ border-radius: 10px !important;
140
+ }
141
+
142
+ button.primary {
143
+ background-color: rgb(40,9,109) !important;
144
+ color: #ffffff !important;
145
+ border: none !important;
146
+ }
147
+
148
+ button.primary:hover {
149
+ background-color: rgb(60,20,140) !important;
150
+ }
151
+
152
+ button.secondary {
153
+ background-color: #ffffff !important;
154
+ color: rgb(40,9,109) !important;
155
+ border: 2px solid rgb(40,9,109) !important;
156
+ }
157
+
158
+ button.secondary:hover {
159
+ background-color: rgb(240,238,250) !important;
160
+ }
161
+
162
+ /* --- Dataframes --- */
163
+ [data-testid="dataframe"] {
164
+ background-color: #ffffff !important;
165
+ border-radius: 10px !important;
166
+ }
167
+
168
+ table {
169
+ font-size: 0.85rem !important;
170
+ }
171
+
172
+ /* --- Chatbot (AI Dashboard tab) --- */
173
+ .gr-chatbot {
174
+ min-height: 380px !important;
175
+ background-color: #ffffff !important;
176
+ border-radius: 12px !important;
177
+ }
178
+
179
+ .gr-chatbot .message.user {
180
+ background-color: rgb(232,225,250) !important;
181
+ border-radius: 12px !important;
182
+ }
183
+
184
+ .gr-chatbot .message.bot {
185
+ background-color: #f3f4f6 !important;
186
+ border-radius: 12px !important;
187
+ }
188
+
189
+ /* --- Gallery --- */
190
+ .gallery {
191
+ background: #ffffff !important;
192
+ border-radius: 10px !important;
193
+ }
194
+
195
+ /* --- Log textbox --- */
196
+ textarea {
197
+ font-family: monospace !important;
198
+ font-size: 0.8rem !important;
199
+ }
200
+
201
+ /* --- Markdown headings inside tabs --- */
202
+ .tabitem h3 {
203
+ color: rgb(40,9,109) !important;
204
+ font-weight: 700 !important;
205
+ }
206
+
207
+ .tabitem h4 {
208
+ color: #374151 !important;
209
+ }
210
+
211
+ /* --- Examples row (AI Dashboard) --- */
212
+ .examples-row button {
213
+ background: rgb(240,238,250) !important;
214
+ color: rgb(40,9,109) !important;
215
+ border: 1px solid rgb(40,9,109) !important;
216
+ border-radius: 8px !important;
217
+ font-size: 0.85rem !important;
218
+ }
219
+
220
+ .examples-row button:hover {
221
+ background: rgb(232,225,250) !important;
222
+ }
223
+
224
+ /* --- Header / footer: transparent over banner --- */
225
+ header, header *,
226
+ footer, footer * {
227
+ background: transparent !important;
228
+ box-shadow: none !important;
229
+ }
230
+
231
+ footer a, footer button,
232
+ header a, header button {
233
+ background: transparent !important;
234
+ border: none !important;
235
+ box-shadow: none !important;
236
+ }
237
+
238
+ #footer, #footer *,
239
+ [class*="footer"], [class*="footer"] *,
240
+ [class*="chip"], [class*="pill"], [class*="chip"] *, [class*="pill"] * {
241
+ background: transparent !important;
242
+ border: none !important;
243
+ box-shadow: none !important;
244
+ }
245
+
246
+ [data-testid*="api"], [data-testid*="settings"],
247
+ [id*="api"], [id*="settings"],
248
+ [class*="api"], [class*="settings"],
249
+ [class*="bottom"], [class*="toolbar"], [class*="controls"] {
250
+ background: transparent !important;
251
+ box-shadow: none !important;
252
+ }
253
+
254
+ [data-testid*="api"] *, [data-testid*="settings"] *,
255
+ [id*="api"] *, [id*="settings"] *,
256
+ [class*="api"] *, [class*="settings"] * {
257
+ background: transparent !important;
258
+ box-shadow: none !important;
259
+ }
260
+
261
+ section footer {
262
+ background: transparent !important;
263
+ }
264
+
265
+ section footer button,
266
+ section footer a {
267
+ background: transparent !important;
268
+ background-color: transparent !important;
269
+ border: none !important;
270
+ box-shadow: none !important;
271
+ color: white !important;
272
+ }
273
+
274
+ section footer button:hover,
275
+ section footer button:focus,
276
+ section footer a:hover,
277
+ section footer a:focus {
278
+ background: transparent !important;
279
+ background-color: transparent !important;
280
+ box-shadow: none !important;
281
+ }
282
+
283
+ section footer button,
284
+ section footer button * {
285
+ background: transparent !important;
286
+ background-color: transparent !important;
287
+ background-image: none !important;
288
+ box-shadow: none !important;
289
+ filter: none !important;
290
+ }
291
+
292
+ section footer button::before,
293
+ section footer button::after {
294
+ background: transparent !important;
295
+ background-color: transparent !important;
296
+ background-image: none !important;
297
+ box-shadow: none !important;
298
+ filter: none !important;
299
+ }
300
+
301
+ section footer a,
302
+ section footer a * {
303
+ background: transparent !important;
304
+ background-color: transparent !important;
305
+ box-shadow: none !important;
306
+ }
307
+
308
+ .gradio-container footer button,
309
+ .gradio-container footer button *,
310
+ .gradio-container .footer button,
311
+ .gradio-container .footer button * {
312
+ background: transparent !important;
313
+ background-color: transparent !important;
314
+ background-image: none !important;
315
+ box-shadow: none !important;
316
+ }
317
+
318
+ .gradio-container footer button::before,
319
+ .gradio-container footer button::after,
320
+ .gradio-container .footer button::before,
321
+ .gradio-container .footer button::after {
322
+ background: transparent !important;
323
+ background-color: transparent !important;
324
+ background-image: none !important;
325
+ box-shadow: none !important;
326
+ }