RimAlMoatassime commited on
Commit
a16ca13
·
verified ·
1 Parent(s): 7a28183

Upload 6 files

Browse files
2a_Python_Analysis_(1)_(1).ipynb ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,11 +1,47 @@
1
  ---
2
- title: SE21 App Template
3
- emoji: 📊
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: docker
 
 
7
  pinned: false
8
- short_description: AI-enhanced analytics dashboard template for SE21 students
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Interactive Notebook Runner
3
+ emoji: 📓
4
+ colorFrom: indigo
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
+ # Interactive Notebook Runner
13
+
14
+ This Space lets a user:
15
+ - upload a Jupyter notebook
16
+ - upload two CSV datasets
17
+ - preview the uploaded datasets
18
+ - run the notebook in the browser
19
+ - download the executed notebook and a ZIP of outputs
20
+
21
+ ## Files to upload to the Space root
22
+ - `app.py`
23
+ - `requirements.txt`
24
+ - `README.md`
25
+
26
+ ## Optional bundled defaults
27
+ If you want the app to work even with no uploads, also keep:
28
+ - `analysis_notebook.ipynb`
29
+ - `synthetic_book_reviews.csv`
30
+ - `synthetic_sales_data.csv`
31
+
32
+ ## Important notebook rule
33
+ The app copies the two selected CSV files into the runtime folder with these names:
34
+ - `synthetic_book_reviews.csv`
35
+ - `synthetic_sales_data.csv`
36
+
37
+ So the notebook should read those filenames during execution.
38
+
39
+ ## Why the app is interactive
40
+ The UI has:
41
+ - file upload components
42
+ - a preview button
43
+ - a run button
44
+ - output tables
45
+ - downloadable files
46
+
47
+ That means the user is not just reading a static page; they can actively choose files and trigger execution.
app.py CHANGED
@@ -1,758 +1,299 @@
1
- import os
 
 
 
 
 
 
2
  import re
3
  import json
4
- import time
5
- import traceback
 
6
  from pathlib import Path
7
- from typing import Dict, Any, List, Tuple
8
 
9
- import pandas as pd
10
  import gradio as gr
11
- import papermill as pm
12
- import plotly.graph_objects as go
13
-
14
- # Optional LLM (HuggingFace Inference API)
15
- try:
16
- from huggingface_hub import InferenceClient
17
- except Exception:
18
- InferenceClient = None
19
-
20
- # =========================================================
21
- # CONFIG
22
- # =========================================================
23
-
24
- BASE_DIR = Path(__file__).resolve().parent
25
-
26
- NB1 = os.environ.get("NB1", "datacreation.ipynb").strip()
27
- NB2 = os.environ.get("NB2", "pythonanalysis.ipynb").strip()
28
-
29
- RUNS_DIR = BASE_DIR / "runs"
30
- ART_DIR = BASE_DIR / "artifacts"
31
- PY_FIG_DIR = ART_DIR / "py" / "figures"
32
- PY_TAB_DIR = ART_DIR / "py" / "tables"
33
-
34
- PAPERMILL_TIMEOUT = int(os.environ.get("PAPERMILL_TIMEOUT", "1800"))
35
- MAX_PREVIEW_ROWS = int(os.environ.get("MAX_FILE_PREVIEW_ROWS", "50"))
36
- MAX_LOG_CHARS = int(os.environ.get("MAX_LOG_CHARS", "8000"))
37
-
38
- HF_API_KEY = os.environ.get("HF_API_KEY", "").strip()
39
- MODEL_NAME = os.environ.get("MODEL_NAME", "deepseek-ai/DeepSeek-R1").strip()
40
- HF_PROVIDER = os.environ.get("HF_PROVIDER", "novita").strip()
41
- N8N_WEBHOOK_URL = os.environ.get("N8N_WEBHOOK_URL", "").strip()
42
-
43
- LLM_ENABLED = bool(HF_API_KEY) and InferenceClient is not None
44
- llm_client = (
45
- InferenceClient(provider=HF_PROVIDER, api_key=HF_API_KEY)
46
- if LLM_ENABLED
47
- else None
48
- )
49
-
50
- # =========================================================
51
- # HELPERS
52
- # =========================================================
53
-
54
- def ensure_dirs():
55
- for p in [RUNS_DIR, ART_DIR, PY_FIG_DIR, PY_TAB_DIR]:
56
- p.mkdir(parents=True, exist_ok=True)
57
-
58
- def stamp():
59
- return time.strftime("%Y%m%d-%H%M%S")
60
-
61
- def tail(text: str, n: int = MAX_LOG_CHARS) -> str:
62
- return (text or "")[-n:]
63
-
64
- def _ls(dir_path: Path, exts: Tuple[str, ...]) -> List[str]:
65
- if not dir_path.is_dir():
66
- return []
67
- return sorted(p.name for p in dir_path.iterdir() if p.is_file() and p.suffix.lower() in exts)
68
-
69
- def _read_csv(path: Path) -> pd.DataFrame:
70
- return pd.read_csv(path, nrows=MAX_PREVIEW_ROWS)
71
-
72
- def _read_json(path: Path):
73
- with path.open(encoding="utf-8") as f:
74
- return json.load(f)
75
-
76
- def artifacts_index() -> Dict[str, Any]:
77
- return {
78
- "python": {
79
- "figures": _ls(PY_FIG_DIR, (".png", ".jpg", ".jpeg")),
80
- "tables": _ls(PY_TAB_DIR, (".csv", ".json")),
81
- },
82
- }
83
-
84
- # =========================================================
85
- # PIPELINE RUNNERS
86
- # =========================================================
87
-
88
- def run_notebook(nb_name: str) -> str:
89
- ensure_dirs()
90
- nb_in = BASE_DIR / nb_name
91
- if not nb_in.exists():
92
- return f"ERROR: {nb_name} not found."
93
- nb_out = RUNS_DIR / f"run_{stamp()}_{nb_name}"
94
- pm.execute_notebook(
95
- input_path=str(nb_in),
96
- output_path=str(nb_out),
97
- cwd=str(BASE_DIR),
98
- log_output=True,
99
- progress_bar=False,
100
- request_save_on_cell_execute=True,
101
- execution_timeout=PAPERMILL_TIMEOUT,
102
- )
103
- return f"Executed {nb_name}"
104
-
105
-
106
- def run_datacreation() -> str:
107
- try:
108
- log = run_notebook(NB1)
109
- csvs = [f.name for f in BASE_DIR.glob("*.csv")]
110
- return f"OK {log}\n\nCSVs now in /app:\n" + "\n".join(f" - {c}" for c in sorted(csvs))
111
- except Exception as e:
112
- return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
113
-
114
-
115
- def run_pythonanalysis() -> str:
116
- try:
117
- log = run_notebook(NB2)
118
- idx = artifacts_index()
119
- figs = idx["python"]["figures"]
120
- tabs = idx["python"]["tables"]
121
- return (
122
- f"OK {log}\n\n"
123
- f"Figures: {', '.join(figs) or '(none)'}\n"
124
- f"Tables: {', '.join(tabs) or '(none)'}"
125
- )
126
- except Exception as e:
127
- return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
128
-
129
-
130
- def run_full_pipeline() -> str:
131
- logs = []
132
- logs.append("=" * 50)
133
- logs.append("STEP 1/2: Data Creation (web scraping + synthetic data)")
134
- logs.append("=" * 50)
135
- logs.append(run_datacreation())
136
- logs.append("")
137
- logs.append("=" * 50)
138
- logs.append("STEP 2/2: Python Analysis (sentiment, ARIMA, dashboard)")
139
- logs.append("=" * 50)
140
- logs.append(run_pythonanalysis())
141
- return "\n".join(logs)
142
-
143
-
144
- # =========================================================
145
- # GALLERY LOADERS
146
- # =========================================================
147
-
148
- def _load_all_figures() -> List[Tuple[str, str]]:
149
- """Return list of (filepath, caption) for Gallery."""
150
- items = []
151
- for p in sorted(PY_FIG_DIR.glob("*.png")):
152
- items.append((str(p), p.stem.replace('_', ' ').title()))
153
- return items
154
-
155
-
156
- def _load_table_safe(path: Path) -> pd.DataFrame:
157
  try:
158
- if path.suffix == ".json":
159
- obj = _read_json(path)
160
- if isinstance(obj, dict):
161
- return pd.DataFrame([obj])
162
- return pd.DataFrame(obj)
163
- return _read_csv(path)
164
- except Exception as e:
165
- return pd.DataFrame([{"error": str(e)}])
166
-
167
-
168
- def refresh_gallery():
169
- """Called when user clicks Refresh on Gallery tab."""
170
- figures = _load_all_figures()
171
- idx = artifacts_index()
172
-
173
- table_choices = list(idx["python"]["tables"])
174
-
175
- default_df = pd.DataFrame()
176
- if table_choices:
177
- default_df = _load_table_safe(PY_TAB_DIR / table_choices[0])
178
-
179
- return (
180
- figures if figures else [],
181
- gr.update(choices=table_choices, value=table_choices[0] if table_choices else None),
182
- default_df,
183
  )
 
184
 
185
 
186
- def on_table_select(choice: str):
187
- if not choice:
188
- return pd.DataFrame([{"hint": "Select a table above."}])
189
- path = PY_TAB_DIR / choice
190
- if not path.exists():
191
- return pd.DataFrame([{"error": f"File not found: {choice}"}])
192
- return _load_table_safe(path)
193
 
 
 
 
 
 
 
 
 
 
194
 
195
- # =========================================================
196
- # KPI LOADER
197
- # =========================================================
198
-
199
- def load_kpis() -> Dict[str, Any]:
200
- for candidate in [PY_TAB_DIR / "kpis.json", PY_FIG_DIR / "kpis.json"]:
201
- if candidate.exists():
202
- try:
203
- return _read_json(candidate)
204
- except Exception:
205
- pass
206
- return {}
207
-
208
-
209
- # =========================================================
210
- # AI DASHBOARD -- LLM picks what to display
211
- # =========================================================
212
-
213
- DASHBOARD_SYSTEM = """You are an AI dashboard assistant for a book-sales analytics app.
214
- The user asks questions or requests about their data. You have access to pre-computed
215
- artifacts from a Python analysis pipeline.
216
-
217
- AVAILABLE ARTIFACTS (only reference ones that exist):
218
- {artifacts_json}
219
 
220
- KPI SUMMARY: {kpis_json}
221
 
222
- YOUR JOB:
223
- 1. Answer the user's question conversationally using the KPIs and your knowledge of the artifacts.
224
- 2. At the END of your response, output a JSON block (fenced with ```json ... ```) that tells
225
- the dashboard which artifact to display. The JSON must have this shape:
226
- {{"show": "figure"|"table"|"none", "scope": "python", "filename": "..."}}
227
 
228
- - Use "show": "figure" to display a chart image.
229
- - Use "show": "table" to display a CSV/JSON table.
230
- - Use "show": "none" if no artifact is relevant.
231
 
232
- RULES:
233
- - If the user asks about sales trends or forecasting by title, show sales_trends or arima figures.
234
- - If the user asks about sentiment, show sentiment figure or sentiment_counts table.
235
- - If the user asks about forecast accuracy or ARIMA, show arima figures.
236
- - If the user asks about top sellers, show top_titles_by_units_sold.csv.
237
- - If the user asks a general data question, pick the most relevant artifact.
238
- - Keep your answer concise (2-4 sentences), then the JSON block.
239
- """
240
-
241
- JSON_BLOCK_RE = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)
242
- FALLBACK_JSON_RE = re.compile(r"\{[^{}]*\"show\"[^{}]*\}", re.DOTALL)
243
 
 
 
 
 
244
 
245
- def _parse_display_directive(text: str) -> Dict[str, str]:
246
- m = JSON_BLOCK_RE.search(text)
247
- if m:
248
- try:
249
- return json.loads(m.group(1))
250
- except json.JSONDecodeError:
251
- pass
252
- m = FALLBACK_JSON_RE.search(text)
253
- if m:
254
- try:
255
- return json.loads(m.group(0))
256
- except json.JSONDecodeError:
257
- pass
258
- return {"show": "none"}
259
 
 
260
 
261
- def _clean_response(text: str) -> str:
262
- """Strip the JSON directive block from the displayed response."""
263
- return JSON_BLOCK_RE.sub("", text).strip()
264
 
 
 
 
 
265
 
266
- def _n8n_call(msg: str) -> Tuple[str, Dict]:
267
- """Call the student's n8n webhook and return (reply, directive)."""
268
- import requests as req
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
  try:
270
- resp = req.post(N8N_WEBHOOK_URL, json={"question": msg}, timeout=20)
271
- data = resp.json()
272
- answer = data.get("answer", "No response from n8n workflow.")
273
- chart = data.get("chart", "none")
274
- if chart and chart != "none":
275
- return answer, {"show": "figure", "chart": chart}
276
- return answer, {"show": "none"}
277
- except Exception as e:
278
- return f"n8n error: {e}. Falling back to keyword matching.", None
279
-
280
-
281
- def ai_chat(user_msg: str, history: list):
282
- """Chat function for the AI Dashboard tab."""
283
- if not user_msg or not user_msg.strip():
284
- return history, "", None, None
285
-
286
- idx = artifacts_index()
287
- kpis = load_kpis()
288
-
289
- # Priority: n8n webhook > HF LLM > keyword fallback
290
- if N8N_WEBHOOK_URL:
291
- reply, directive = _n8n_call(user_msg)
292
- if directive is None:
293
- reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
294
- reply += "\n\n" + reply_fb
295
- elif not LLM_ENABLED:
296
- reply, directive = _keyword_fallback(user_msg, idx, kpis)
297
- else:
298
- system = DASHBOARD_SYSTEM.format(
299
- artifacts_json=json.dumps(idx, indent=2),
300
- kpis_json=json.dumps(kpis, indent=2) if kpis else "(no KPIs yet, run the pipeline first)",
301
- )
302
- msgs = [{"role": "system", "content": system}]
303
- for entry in (history or [])[-6:]:
304
- msgs.append(entry)
305
- msgs.append({"role": "user", "content": user_msg})
306
-
307
- try:
308
- r = llm_client.chat_completion(
309
- model=MODEL_NAME,
310
- messages=msgs,
311
- temperature=0.3,
312
- max_tokens=600,
313
- stream=False,
314
- )
315
- raw = (
316
- r["choices"][0]["message"]["content"]
317
- if isinstance(r, dict)
318
- else r.choices[0].message.content
319
- )
320
- directive = _parse_display_directive(raw)
321
- reply = _clean_response(raw)
322
- except Exception as e:
323
- reply = f"LLM error: {e}. Falling back to keyword matching."
324
- reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
325
- reply += "\n\n" + reply_fb
326
-
327
- # Resolve artifacts — build interactive Plotly charts when possible
328
- chart_out = None
329
- tab_out = None
330
- show = directive.get("show", "none")
331
- fname = directive.get("filename", "")
332
- chart_name = directive.get("chart", "")
333
-
334
- # Interactive chart builders keyed by name
335
- chart_builders = {
336
- "sales": build_sales_chart,
337
- "sentiment": build_sentiment_chart,
338
- "top_sellers": build_top_sellers_chart,
339
- }
340
-
341
- if chart_name and chart_name in chart_builders:
342
- chart_out = chart_builders[chart_name]()
343
- elif show == "figure" and fname:
344
- # Fallback: try to match filename to a chart builder
345
- if "sales_trend" in fname:
346
- chart_out = build_sales_chart()
347
- elif "sentiment" in fname:
348
- chart_out = build_sentiment_chart()
349
- elif "arima" in fname or "forecast" in fname:
350
- chart_out = build_sales_chart() # closest interactive equivalent
351
- else:
352
- chart_out = _empty_chart(f"No interactive chart for {fname}")
353
-
354
- if show == "table" and fname:
355
- fp = PY_TAB_DIR / fname
356
- if fp.exists():
357
- tab_out = _load_table_safe(fp)
358
- else:
359
- reply += f"\n\n*(Could not find table: {fname})*"
360
-
361
- new_history = (history or []) + [
362
- {"role": "user", "content": user_msg},
363
- {"role": "assistant", "content": reply},
364
- ]
365
-
366
- return new_history, "", chart_out, tab_out
367
 
 
 
368
 
369
- def _keyword_fallback(msg: str, idx: Dict, kpis: Dict) -> Tuple[str, Dict]:
370
- """Simple keyword matcher when LLM is unavailable."""
371
- msg_lower = msg.lower()
372
 
373
- if not idx["python"]["figures"] and not idx["python"]["tables"]:
374
- return (
375
- "No artifacts found yet. Please run the pipeline first (Tab 1), "
376
- "then come back here to explore the results.",
377
- {"show": "none"},
 
378
  )
 
379
 
380
- kpi_text = ""
381
- if kpis:
382
- total = kpis.get("total_units_sold", 0)
383
- kpi_text = (
384
- f"Quick summary: **{kpis.get('n_titles', '?')}** book titles across "
385
- f"**{kpis.get('n_months', '?')}** months, with **{total:,.0f}** total units sold."
386
- )
387
 
388
- if any(w in msg_lower for w in ["trend", "sales trend", "monthly sale"]):
389
- return (
390
- f"Here are the sales trends. {kpi_text}",
391
- {"show": "figure", "chart": "sales"},
392
- )
393
 
394
- if any(w in msg_lower for w in ["sentiment", "review", "positive", "negative"]):
395
- return (
396
- f"Here is the sentiment distribution across sampled book titles. {kpi_text}",
397
- {"show": "figure", "chart": "sentiment"},
398
- )
399
 
400
- if any(w in msg_lower for w in ["arima", "forecast", "predict"]):
401
- return (
402
- f"Here are the sales trends and forecasts. {kpi_text}",
403
- {"show": "figure", "chart": "sales"},
404
- )
 
 
 
405
 
406
- if any(w in msg_lower for w in ["top", "best sell", "popular", "rank"]):
407
  return (
408
- f"Here are the top-selling titles by units sold. {kpi_text}",
409
- {"show": "table", "scope": "python", "filename": "top_titles_by_units_sold.csv"},
 
 
 
 
 
410
  )
411
 
412
- if any(w in msg_lower for w in ["price", "pricing", "decision"]):
413
  return (
414
- f"Here are the pricing decisions. {kpi_text}",
415
- {"show": "table", "scope": "python", "filename": "pricing_decisions.csv"},
 
 
 
 
 
416
  )
417
-
418
- if any(w in msg_lower for w in ["dashboard", "overview", "summary", "kpi"]):
419
  return (
420
- f"Dashboard overview: {kpi_text}\n\nAsk me about sales trends, sentiment, forecasts, "
421
- "pricing, or top sellers to see specific visualizations.",
422
- {"show": "table", "scope": "python", "filename": "df_dashboard.csv"},
 
 
 
 
423
  )
424
 
425
- # Default
426
- return (
427
- f"I can show you various analyses. {kpi_text}\n\n"
428
- "Try asking about: **sales trends**, **sentiment**, **ARIMA forecasts**, "
429
- "**pricing decisions**, **top sellers**, or **dashboard overview**.",
430
- {"show": "none"},
431
- )
432
-
433
-
434
- # =========================================================
435
- # KPI CARDS (BubbleBusters style)
436
- # =========================================================
437
 
438
- def render_kpi_cards() -> str:
439
- kpis = load_kpis()
440
- if not kpis:
441
- return (
442
- '<div style="background:rgba(255,255,255,.65);backdrop-filter:blur(16px);'
443
- 'border-radius:20px;padding:28px;text-align:center;'
444
- 'border:1.5px solid rgba(255,255,255,.7);'
445
- 'box-shadow:0 8px 32px rgba(124,92,191,.08);">'
446
- '<div style="font-size:36px;margin-bottom:10px;">📊</div>'
447
- '<div style="color:#a48de8;font-size:14px;'
448
- 'font-weight:800;margin-bottom:6px;">No data yet</div>'
449
- '<div style="color:#9d8fc4;font-size:12px;">'
450
- 'Run the pipeline to populate these cards.</div>'
451
- '</div>'
452
- )
453
 
454
- def card(icon, label, value, colour):
455
- return f"""
456
- <div style="background:rgba(255,255,255,.72);backdrop-filter:blur(16px);
457
- border-radius:20px;padding:18px 14px 16px;text-align:center;
458
- border:1.5px solid rgba(255,255,255,.8);
459
- box-shadow:0 4px 16px rgba(124,92,191,.08);
460
- border-top:3px solid {colour};">
461
- <div style="font-size:26px;margin-bottom:7px;line-height:1;">{icon}</div>
462
- <div style="color:#9d8fc4;font-size:9.5px;text-transform:uppercase;
463
- letter-spacing:1.8px;margin-bottom:7px;font-weight:800;">{label}</div>
464
- <div style="color:#2d1f4e;font-size:16px;font-weight:800;">{value}</div>
465
- </div>"""
466
-
467
- kpi_config = [
468
- ("n_titles", "📚", "Book Titles", "#a48de8"),
469
- ("n_months", "📅", "Time Periods", "#7aa6f8"),
470
- ("total_units_sold", "📦", "Units Sold", "#6ee7c7"),
471
- ("total_revenue", "💰", "Revenue", "#3dcba8"),
472
- ]
473
 
474
- html = (
475
- '<div style="display:grid;grid-template-columns:repeat(auto-fit,minmax(140px,1fr));'
476
- 'gap:12px;margin-bottom:24px;">'
477
  )
478
- for key, icon, label, colour in kpi_config:
479
- val = kpis.get(key)
480
- if val is None:
481
- continue
482
- if isinstance(val, (int, float)) and val > 100:
483
- val = f"{val:,.0f}"
484
- html += card(icon, label, str(val), colour)
485
- # Extra KPIs not in config
486
- known = {k for k, *_ in kpi_config}
487
- for key, val in kpis.items():
488
- if key not in known:
489
- label = key.replace("_", " ").title()
490
- if isinstance(val, (int, float)) and val > 100:
491
- val = f"{val:,.0f}"
492
- html += card("📈", label, str(val), "#8fa8f8")
493
- html += "</div>"
494
- return html
495
-
496
-
497
- # =========================================================
498
- # INTERACTIVE PLOTLY CHARTS (BubbleBusters style)
499
- # =========================================================
500
-
501
- CHART_PALETTE = ["#7c5cbf", "#2ec4a0", "#e8537a", "#e8a230", "#5e8fef",
502
- "#c45ea8", "#3dbacc", "#a0522d", "#6aaa3a", "#d46060"]
503
-
504
- def _styled_layout(**kwargs) -> dict:
505
- defaults = dict(
506
- template="plotly_white",
507
- paper_bgcolor="rgba(255,255,255,0.95)",
508
- plot_bgcolor="rgba(255,255,255,0.98)",
509
- font=dict(family="system-ui, sans-serif", color="#2d1f4e", size=12),
510
- margin=dict(l=60, r=20, t=70, b=70),
511
- legend=dict(
512
- orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1,
513
- bgcolor="rgba(255,255,255,0.92)",
514
- bordercolor="rgba(124,92,191,0.35)", borderwidth=1,
515
- ),
516
- title=dict(font=dict(size=15, color="#4b2d8a")),
517
- )
518
- defaults.update(kwargs)
519
- return defaults
520
-
521
-
522
- def _empty_chart(title: str) -> go.Figure:
523
- fig = go.Figure()
524
- fig.update_layout(
525
- title=title, height=420, template="plotly_white",
526
- paper_bgcolor="rgba(255,255,255,0.95)",
527
- annotations=[dict(text="Run the pipeline to generate data",
528
- x=0.5, y=0.5, xref="paper", yref="paper", showarrow=False,
529
- font=dict(size=14, color="rgba(124,92,191,0.5)"))],
530
- )
531
- return fig
532
-
533
-
534
- def build_sales_chart() -> go.Figure:
535
- path = PY_TAB_DIR / "df_dashboard.csv"
536
- if not path.exists():
537
- return _empty_chart("Sales Trends — run the pipeline first")
538
- df = pd.read_csv(path)
539
- date_col = next((c for c in df.columns if "month" in c.lower() or "date" in c.lower()), None)
540
- val_cols = [c for c in df.columns if c != date_col and df[c].dtype in ("float64", "int64")]
541
- if not date_col or not val_cols:
542
- return _empty_chart("Could not auto-detect columns in df_dashboard.csv")
543
- df[date_col] = pd.to_datetime(df[date_col], errors="coerce")
544
- fig = go.Figure()
545
- for i, col in enumerate(val_cols):
546
- fig.add_trace(go.Scatter(
547
- x=df[date_col], y=df[col], name=col.replace("_", " ").title(),
548
- mode="lines+markers", line=dict(color=CHART_PALETTE[i % len(CHART_PALETTE)], width=2),
549
- marker=dict(size=4),
550
- hovertemplate=f"<b>{col.replace('_',' ').title()}</b><br>%{{x|%b %Y}}: %{{y:,.0f}}<extra></extra>",
551
- ))
552
- fig.update_layout(**_styled_layout(height=450, hovermode="x unified",
553
- title=dict(text="Monthly Overview")))
554
- fig.update_xaxes(gridcolor="rgba(124,92,191,0.15)", showgrid=True)
555
- fig.update_yaxes(gridcolor="rgba(124,92,191,0.15)", showgrid=True)
556
- return fig
557
-
558
-
559
- def build_sentiment_chart() -> go.Figure:
560
- path = PY_TAB_DIR / "sentiment_counts_sampled.csv"
561
- if not path.exists():
562
- return _empty_chart("Sentiment Distribution — run the pipeline first")
563
- df = pd.read_csv(path)
564
- title_col = df.columns[0]
565
- sent_cols = [c for c in ["negative", "neutral", "positive"] if c in df.columns]
566
- if not sent_cols:
567
- return _empty_chart("No sentiment columns found in CSV")
568
- colors = {"negative": "#e8537a", "neutral": "#5e8fef", "positive": "#2ec4a0"}
569
- fig = go.Figure()
570
- for col in sent_cols:
571
- fig.add_trace(go.Bar(
572
- name=col.title(), y=df[title_col], x=df[col],
573
- orientation="h", marker_color=colors.get(col, "#888"),
574
- hovertemplate=f"<b>{col.title()}</b>: %{{x}}<extra></extra>",
575
- ))
576
- fig.update_layout(**_styled_layout(
577
- height=max(400, len(df) * 28), barmode="stack",
578
- title=dict(text="Sentiment Distribution by Book"),
579
- ))
580
- fig.update_xaxes(title="Number of Reviews")
581
- fig.update_yaxes(autorange="reversed")
582
- return fig
583
-
584
-
585
- def build_top_sellers_chart() -> go.Figure:
586
- path = PY_TAB_DIR / "top_titles_by_units_sold.csv"
587
- if not path.exists():
588
- return _empty_chart("Top Sellers — run the pipeline first")
589
- df = pd.read_csv(path).head(15)
590
- title_col = next((c for c in df.columns if "title" in c.lower()), df.columns[0])
591
- val_col = next((c for c in df.columns if "unit" in c.lower() or "sold" in c.lower()), df.columns[-1])
592
- fig = go.Figure(go.Bar(
593
- y=df[title_col], x=df[val_col], orientation="h",
594
- marker=dict(color=df[val_col], colorscale=[[0, "#c5b4f0"], [1, "#7c5cbf"]]),
595
- hovertemplate="<b>%{y}</b><br>Units: %{x:,.0f}<extra></extra>",
596
- ))
597
- fig.update_layout(**_styled_layout(
598
- height=max(400, len(df) * 30),
599
- title=dict(text="Top Selling Titles"), showlegend=False,
600
- ))
601
- fig.update_yaxes(autorange="reversed")
602
- fig.update_xaxes(title="Total Units Sold")
603
- return fig
604
-
605
-
606
- def refresh_dashboard():
607
- return render_kpi_cards(), build_sales_chart(), build_sentiment_chart(), build_top_sellers_chart()
608
-
609
-
610
- # =========================================================
611
- # UI
612
- # =========================================================
613
-
614
- ensure_dirs()
615
-
616
- def load_css() -> str:
617
- css_path = BASE_DIR / "style.css"
618
- return css_path.read_text(encoding="utf-8") if css_path.exists() else ""
619
-
620
-
621
- with gr.Blocks(title="AIBDM 2026 Workshop App") as demo:
622
 
623
- gr.Markdown(
624
- "# SE21 App Template\n"
625
- "*This is an app template for SE21 students*",
626
- elem_id="escp_title",
627
- )
628
 
629
- # ===========================================================
630
- # TAB 1 -- Pipeline Runner
631
- # ===========================================================
632
- with gr.Tab("Pipeline Runner"):
633
- gr.Markdown()
634
 
635
- with gr.Row():
636
- with gr.Column(scale=1):
637
- btn_nb1 = gr.Button("Step 1: Data Creation", variant="secondary")
638
- with gr.Column(scale=1):
639
- btn_nb2 = gr.Button("Step 2: Python Analysis", variant="secondary")
640
 
 
641
  with gr.Row():
642
- btn_all = gr.Button("Run Full Pipeline (Both Steps)", variant="primary")
643
-
644
- run_log = gr.Textbox(
645
- label="Execution Log",
646
- lines=18,
647
- max_lines=30,
648
- interactive=False,
649
- )
650
-
651
- btn_nb1.click(run_datacreation, outputs=[run_log])
652
- btn_nb2.click(run_pythonanalysis, outputs=[run_log])
653
- btn_all.click(run_full_pipeline, outputs=[run_log])
654
-
655
- # ===========================================================
656
- # TAB 2 -- Dashboard (KPIs + Interactive Charts + Gallery)
657
- # ===========================================================
658
- with gr.Tab("Dashboard"):
659
- kpi_html = gr.HTML(value=render_kpi_cards)
660
-
661
- refresh_btn = gr.Button("Refresh Dashboard", variant="primary")
662
-
663
- gr.Markdown("#### Interactive Charts")
664
- chart_sales = gr.Plot(label="Monthly Overview")
665
- chart_sentiment = gr.Plot(label="Sentiment Distribution")
666
- chart_top = gr.Plot(label="Top Sellers")
667
-
668
- gr.Markdown("#### Static Figures (from notebooks)")
669
- gallery = gr.Gallery(
670
- label="Generated Figures",
671
- columns=2,
672
- height=480,
673
- object_fit="contain",
674
- )
675
 
676
- gr.Markdown("#### Data Tables")
677
- table_dropdown = gr.Dropdown(
678
- label="Select a table to view",
679
- choices=[],
680
- interactive=True,
681
- )
682
- table_display = gr.Dataframe(
683
- label="Table Preview",
684
- interactive=False,
685
- )
686
-
687
- def _on_refresh():
688
- kpi, c1, c2, c3 = refresh_dashboard()
689
- figs, dd, df = refresh_gallery()
690
- return kpi, c1, c2, c3, figs, dd, df
691
-
692
- refresh_btn.click(
693
- _on_refresh,
694
- outputs=[kpi_html, chart_sales, chart_sentiment, chart_top,
695
- gallery, table_dropdown, table_display],
696
- )
697
- table_dropdown.change(
698
- on_table_select,
699
- inputs=[table_dropdown],
700
- outputs=[table_display],
701
- )
702
-
703
- # ===========================================================
704
- # TAB 3 -- AI Dashboard
705
- # ===========================================================
706
- with gr.Tab('"AI" Dashboard'):
707
- _ai_status = (
708
- "Connected to your **n8n workflow**." if N8N_WEBHOOK_URL
709
- else "**LLM active.**" if LLM_ENABLED
710
- else "Using **keyword matching**. Upgrade options: "
711
- "set `N8N_WEBHOOK_URL` to connect your n8n workflow, "
712
- "or set `HF_API_KEY` for direct LLM access."
713
- )
714
- gr.Markdown(
715
- "### Ask questions, get interactive visualisations\n\n"
716
- f"Type a question and the system will pick the right interactive chart or table. {_ai_status}"
717
- )
718
-
719
- with gr.Row(equal_height=True):
720
- with gr.Column(scale=1):
721
- chatbot = gr.Chatbot(
722
- label="Conversation",
723
- height=380,
724
- )
725
- user_input = gr.Textbox(
726
- label="Ask about your data",
727
- placeholder="e.g. Show me sales trends / What are the top sellers? / Sentiment analysis",
728
- lines=1,
729
- )
730
- gr.Examples(
731
- examples=[
732
- "Show me the sales trends",
733
- "What does the sentiment look like?",
734
- "Which titles sell the most?",
735
- "Show the ARIMA forecasts",
736
- "What are the pricing decisions?",
737
- "Give me a dashboard overview",
738
- ],
739
- inputs=user_input,
740
- )
741
-
742
- with gr.Column(scale=1):
743
- ai_figure = gr.Plot(
744
- label="Interactive Chart",
745
- )
746
- ai_table = gr.Dataframe(
747
- label="Data Table",
748
- interactive=False,
749
- )
750
-
751
- user_input.submit(
752
- ai_chat,
753
- inputs=[user_input, chatbot],
754
- outputs=[chatbot, user_input, ai_figure, ai_table],
755
- )
756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
757
 
758
- demo.launch(css=load_css(), allowed_paths=[str(BASE_DIR)])
 
1
+ # AI-Assisted Code — Academic Integrity Notice
2
+ # Generated with The App Builder. ESCP coursework.
3
+ # Student must be able to explain all code when asked.
4
+
5
+ """Interactive Hugging Face Space to execute a Jupyter notebook on uploaded CSV files."""
6
+
7
+ import io
8
  import re
9
  import json
10
+ import shutil
11
+ import zipfile
12
+ import tempfile
13
  from pathlib import Path
 
14
 
 
15
  import gradio as gr
16
+ import nbformat
17
+ import pandas as pd
18
+ from nbclient import NotebookClient
19
+ from nbclient.exceptions import CellExecutionError
20
+
21
+
22
+ APP_DIR = Path(__file__).resolve().parent
23
+ DEFAULT_NOTEBOOK = APP_DIR / "analysis_notebook.ipynb"
24
+ DEFAULT_CSVS = [
25
+ APP_DIR / "synthetic_book_reviews.csv",
26
+ APP_DIR / "synthetic_sales_data.csv",
27
+ ]
28
+ EXPECTED_DATASET_NAMES = [
29
+ "synthetic_book_reviews.csv",
30
+ "synthetic_sales_data.csv",
31
+ ]
32
+
33
+
34
+ def _display_name(file_obj):
35
+ """Return a readable filename from a Gradio upload object."""
36
+ if file_obj is None:
37
+ return ""
38
+ if isinstance(file_obj, str):
39
+ return Path(file_obj).name
40
+ name = getattr(file_obj, "name", "")
41
+ return Path(name).name if name else ""
42
+
43
+
44
+ def _resolve_uploaded_path(file_obj):
45
+ """Convert Gradio file input into a local Path."""
46
+ if file_obj is None:
47
+ return None
48
+ if isinstance(file_obj, str):
49
+ return Path(file_obj)
50
+ file_name = getattr(file_obj, "name", None)
51
+ return Path(file_name) if file_name else None
52
+
53
+
54
+ def _preview_csv(file_obj):
55
+ """Load a small preview for the UI."""
56
+ file_path = _resolve_uploaded_path(file_obj)
57
+ if file_path is None:
58
+ return pd.DataFrame({"Info": ["No file uploaded yet."]})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  try:
60
+ preview_df = pd.read_csv(file_path).head(10)
61
+ return preview_df
62
+ except Exception as error:
63
+ return pd.DataFrame({"Error": [f"Could not preview {file_path.name}: {error}"]})
64
+
65
+
66
+ def preview_datasets(csv_one, csv_two):
67
+ """Return preview tables and a small status message."""
68
+ left = _preview_csv(csv_one)
69
+ right = _preview_csv(csv_two)
70
+ message = (
71
+ f"Preview ready. File 1: {_display_name(csv_one) or 'default / missing'} | "
72
+ f"File 2: {_display_name(csv_two) or 'default / missing'}"
 
 
 
 
 
 
 
 
 
 
 
 
73
  )
74
+ return left, right, message
75
 
76
 
77
+ def _remove_runtime_install_cells(notebook_node):
78
+ """Remove shell install cells because Hugging Face installs from requirements.txt."""
79
+ cleaned_cells = []
80
+ removed_count = 0
81
+ install_pattern = re.compile(r"^\s*!pip\s+install|^\s*%pip\s+install", re.IGNORECASE)
 
 
82
 
83
+ for cell in notebook_node.cells:
84
+ if cell.get("cell_type") != "code":
85
+ cleaned_cells.append(cell)
86
+ continue
87
+ source = cell.get("source", "")
88
+ if install_pattern.search(source.strip()):
89
+ removed_count += 1
90
+ continue
91
+ cleaned_cells.append(cell)
92
 
93
+ notebook_node.cells = cleaned_cells
94
+ return removed_count
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
 
96
 
97
+ def _prepare_run_directory(notebook_file, csv_one, csv_two):
98
+ """Create a clean temp folder and standardize filenames for the notebook."""
99
+ run_dir = Path(tempfile.mkdtemp(prefix="hf_notebook_run_"))
 
 
100
 
101
+ notebook_source = _resolve_uploaded_path(notebook_file) or DEFAULT_NOTEBOOK
102
+ if not notebook_source.exists():
103
+ raise FileNotFoundError("No notebook found. Upload one or include analysis_notebook.ipynb.")
104
 
105
+ notebook_target = run_dir / "analysis_notebook.ipynb"
106
+ shutil.copy2(notebook_source, notebook_target)
 
 
 
 
 
 
 
 
 
107
 
108
+ csv_sources = [
109
+ _resolve_uploaded_path(csv_one) or DEFAULT_CSVS[0],
110
+ _resolve_uploaded_path(csv_two) or DEFAULT_CSVS[1],
111
+ ]
112
 
113
+ for source, expected_name in zip(csv_sources, EXPECTED_DATASET_NAMES):
114
+ if not Path(source).exists():
115
+ raise FileNotFoundError(
116
+ f"Missing dataset: {expected_name}. Upload it or keep the bundled default file."
117
+ )
118
+ shutil.copy2(source, run_dir / expected_name)
 
 
 
 
 
 
 
 
119
 
120
+ return run_dir, notebook_target
121
 
 
 
 
122
 
123
+ def _extract_notebook_outputs(executed_notebook):
124
+ """Collect text and tables from executed notebook cells."""
125
+ text_chunks = []
126
+ tables = []
127
 
128
+ for cell in executed_notebook.cells:
129
+ if cell.get("cell_type") != "code":
130
+ continue
131
+ for output in cell.get("outputs", []):
132
+ if output.get("output_type") == "stream":
133
+ text_chunks.append(output.get("text", ""))
134
+ elif output.get("output_type") in {"execute_result", "display_data"}:
135
+ data = output.get("data", {})
136
+ if "text/plain" in data:
137
+ text_chunks.append(str(data["text/plain"]))
138
+ if "text/html" in data:
139
+ try:
140
+ tables.append(pd.read_html(io.StringIO(data["text/html"]))[0])
141
+ except Exception:
142
+ pass
143
+ elif output.get("output_type") == "error":
144
+ traceback_text = "\n".join(output.get("traceback", []))
145
+ text_chunks.append(traceback_text)
146
+
147
+ combined_text = "\n\n".join(chunk.strip() for chunk in text_chunks if str(chunk).strip())
148
+ if not combined_text:
149
+ combined_text = "Notebook executed, but no text output was captured."
150
+
151
+ return combined_text, tables
152
+
153
+
154
+ def _save_output_bundle(run_dir):
155
+ """Zip everything created during execution for download."""
156
+ zip_path = run_dir / "execution_outputs.zip"
157
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zip_file:
158
+ for item in run_dir.rglob("*"):
159
+ if item == zip_path:
160
+ continue
161
+ if item.is_file():
162
+ zip_file.write(item, item.relative_to(run_dir))
163
+ return zip_path
164
+
165
+
166
+ def run_analysis(notebook_file, csv_one, csv_two):
167
+ """Run the notebook on the selected files and return UI-friendly outputs."""
168
  try:
169
+ run_dir, notebook_path = _prepare_run_directory(notebook_file, csv_one, csv_two)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
+ with notebook_path.open("r", encoding="utf-8") as notebook_handle:
172
+ notebook_node = nbformat.read(notebook_handle, as_version=4)
173
 
174
+ removed_cells = _remove_runtime_install_cells(notebook_node)
 
 
175
 
176
+ client = NotebookClient(
177
+ notebook_node,
178
+ timeout=900,
179
+ kernel_name="python3",
180
+ resources={"metadata": {"path": str(run_dir)}},
181
+ allow_errors=False,
182
  )
183
+ client.execute()
184
 
185
+ executed_notebook_path = run_dir / "executed_analysis_notebook.ipynb"
186
+ with executed_notebook_path.open("w", encoding="utf-8") as notebook_handle:
187
+ nbformat.write(notebook_node, notebook_handle)
 
 
 
 
188
 
189
+ log_text, tables = _extract_notebook_outputs(notebook_node)
190
+ output_zip = _save_output_bundle(run_dir)
 
 
 
191
 
192
+ first_table = tables[0] if tables else pd.DataFrame({"Info": ["No table output detected."]})
193
+ second_table = tables[1] if len(tables) > 1 else pd.DataFrame({"Info": ["No second table detected."]})
 
 
 
194
 
195
+ summary = {
196
+ "status": "success",
197
+ "removed_install_cells": removed_cells,
198
+ "notebook_used": _display_name(notebook_file) or DEFAULT_NOTEBOOK.name,
199
+ "dataset_1": _display_name(csv_one) or DEFAULT_CSVS[0].name,
200
+ "dataset_2": _display_name(csv_two) or DEFAULT_CSVS[1].name,
201
+ "run_directory": str(run_dir),
202
+ }
203
 
 
204
  return (
205
+ " Execution finished successfully.",
206
+ log_text[:15000],
207
+ first_table,
208
+ second_table,
209
+ str(executed_notebook_path),
210
+ str(output_zip),
211
+ json.dumps(summary, indent=2),
212
  )
213
 
214
+ except CellExecutionError as error:
215
  return (
216
+ " Notebook execution failed.",
217
+ str(error),
218
+ pd.DataFrame({"Error": ["Notebook cell execution failed. See log above."]}),
219
+ pd.DataFrame({"Error": ["No second table because execution stopped early."]}),
220
+ None,
221
+ None,
222
+ json.dumps({"status": "failed", "reason": "CellExecutionError"}, indent=2),
223
  )
224
+ except Exception as error:
 
225
  return (
226
+ " App error.",
227
+ str(error),
228
+ pd.DataFrame({"Error": [f"App failed before completion: {error}"]}),
229
+ pd.DataFrame({"Error": ["No second table available."]}),
230
+ None,
231
+ None,
232
+ json.dumps({"status": "failed", "reason": str(error)}, indent=2),
233
  )
234
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
+ CUSTOM_CSS = """
237
+ #run-btn {min-height: 52px; font-size: 18px;}
238
+ .gradio-container {max-width: 1200px !important;}
239
+ """
 
 
 
 
 
 
 
 
 
 
 
240
 
241
+ with gr.Blocks(css=CUSTOM_CSS, theme=gr.themes.Soft()) as demo:
242
+ gr.Markdown(
243
+ """
244
+ # Interactive Notebook Runner
245
+ Upload a Jupyter notebook and two CSV files, preview the datasets, then run the notebook directly in the Space.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
+ **Default behavior:** if you leave uploads empty, the app uses the bundled class notebook and bundled CSV files.
248
+ """
 
249
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
+ with gr.Row():
252
+ notebook_input = gr.File(label="Notebook (.ipynb)", file_types=[".ipynb"])
253
+ csv_one_input = gr.File(label="Dataset 1 (.csv)", file_types=[".csv"])
254
+ csv_two_input = gr.File(label="Dataset 2 (.csv)", file_types=[".csv"])
 
255
 
256
+ with gr.Row():
257
+ preview_button = gr.Button("Refresh previews")
258
+ run_button = gr.Button("Run notebook", elem_id="run-btn")
 
 
259
 
260
+ preview_status = gr.Textbox(label="Preview status", interactive=False)
 
 
 
 
261
 
262
+ with gr.Tab("Dataset previews"):
263
  with gr.Row():
264
+ preview_table_one = gr.Dataframe(label="Preview: dataset 1", interactive=False)
265
+ preview_table_two = gr.Dataframe(label="Preview: dataset 2", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
+ with gr.Tab("Execution results"):
268
+ status_box = gr.Textbox(label="Run status", interactive=False)
269
+ execution_log = gr.Textbox(label="Execution log", lines=18, interactive=False)
270
+ with gr.Row():
271
+ output_table_one = gr.Dataframe(label="Detected output table 1", interactive=False)
272
+ output_table_two = gr.Dataframe(label="Detected output table 2", interactive=False)
273
+ run_metadata = gr.Code(label="Run metadata", language="json")
274
+
275
+ with gr.Tab("Downloads"):
276
+ executed_notebook_file = gr.File(label="Executed notebook")
277
+ execution_zip_file = gr.File(label="ZIP of all outputs")
278
+
279
+ preview_button.click(
280
+ fn=preview_datasets,
281
+ inputs=[csv_one_input, csv_two_input],
282
+ outputs=[preview_table_one, preview_table_two, preview_status],
283
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
+ run_button.click(
286
+ fn=run_analysis,
287
+ inputs=[notebook_input, csv_one_input, csv_two_input],
288
+ outputs=[
289
+ status_box,
290
+ execution_log,
291
+ output_table_one,
292
+ output_table_two,
293
+ executed_notebook_file,
294
+ execution_zip_file,
295
+ run_metadata,
296
+ ],
297
+ )
298
 
299
+ demo.launch()
requirements.txt CHANGED
@@ -1,17 +1,9 @@
1
- gradio==6.0.0
2
- pandas>=2.0.0
3
- numpy>=1.24.0
4
- matplotlib>=3.7.0
5
- seaborn>=0.13.0
6
- statsmodels>=0.14.0
7
- scikit-learn>=1.3.0
8
- papermill>=2.5.0
9
- nbformat>=5.9.0
10
- pillow>=10.0.0
11
- requests>=2.31.0
12
- beautifulsoup4>=4.12.0
13
- vaderSentiment>=3.3.2
14
- huggingface_hub>=0.20.0
15
- textblob>=0.18.0
16
- faker>=20.0.0
17
- plotly>=5.18.0
 
1
+ gradio==4.44.0
2
+ pandas==2.2.2
3
+ nbformat==5.10.4
4
+ nbclient==0.10.0
5
+ ipykernel==6.29.5
6
+ lxml==5.3.0
7
+ matplotlib==3.9.2
8
+ numpy==2.1.1
9
+ vaderSentiment==3.3.2
 
 
 
 
 
 
 
 
synthetic_book_reviews.csv ADDED
The diff for this file is too large to render. See raw diff
 
synthetic_sales_data.csv ADDED
The diff for this file is too large to render. See raw diff