Update app.py
Browse files
app.py
CHANGED
|
@@ -2,20 +2,26 @@ import gradio as gr
|
|
| 2 |
from datasets import load_dataset
|
| 3 |
from sentence_transformers import SentenceTransformer, util
|
| 4 |
import torch
|
| 5 |
-
from itertools import chain
|
| 6 |
|
| 7 |
# Load dataset
|
| 8 |
dataset = load_dataset("asuender/motivational-quotes", "quotes_extended", split="train")
|
|
|
|
|
|
|
| 9 |
quotes = [item["quote"] for item in dataset]
|
| 10 |
authors = [item["author"] for item in dataset]
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
all_tags = sorted(set(chain.from_iterable(tags_list)))
|
| 13 |
|
| 14 |
-
# Load model
|
| 15 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 16 |
quote_embeddings = model.encode(quotes, convert_to_tensor=True)
|
| 17 |
|
| 18 |
-
#
|
| 19 |
def recommend_quote(mood_input, selected_tag):
|
| 20 |
filtered = [(q, a, i) for i, (q, a, t) in enumerate(zip(quotes, authors, tags_list)) if selected_tag in t]
|
| 21 |
if not filtered:
|
|
@@ -36,7 +42,7 @@ def recommend_quote(mood_input, selected_tag):
|
|
| 36 |
|
| 37 |
return result.strip()
|
| 38 |
|
| 39 |
-
# Gradio
|
| 40 |
iface = gr.Interface(
|
| 41 |
fn=recommend_quote,
|
| 42 |
inputs=[
|
|
|
|
| 2 |
from datasets import load_dataset
|
| 3 |
from sentence_transformers import SentenceTransformer, util
|
| 4 |
import torch
|
|
|
|
| 5 |
|
| 6 |
# Load dataset
|
| 7 |
dataset = load_dataset("asuender/motivational-quotes", "quotes_extended", split="train")
|
| 8 |
+
|
| 9 |
+
# Parse data
|
| 10 |
quotes = [item["quote"] for item in dataset]
|
| 11 |
authors = [item["author"] for item in dataset]
|
| 12 |
+
|
| 13 |
+
# Since tags are stored as a **comma-separated string**, convert to list
|
| 14 |
+
tags_list = [item["tags"].split(", ") if item["tags"] else [] for item in dataset]
|
| 15 |
+
|
| 16 |
+
# Get unique tag values
|
| 17 |
+
from itertools import chain
|
| 18 |
all_tags = sorted(set(chain.from_iterable(tags_list)))
|
| 19 |
|
| 20 |
+
# Load embedding model
|
| 21 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 22 |
quote_embeddings = model.encode(quotes, convert_to_tensor=True)
|
| 23 |
|
| 24 |
+
# Recommendation function
|
| 25 |
def recommend_quote(mood_input, selected_tag):
|
| 26 |
filtered = [(q, a, i) for i, (q, a, t) in enumerate(zip(quotes, authors, tags_list)) if selected_tag in t]
|
| 27 |
if not filtered:
|
|
|
|
| 42 |
|
| 43 |
return result.strip()
|
| 44 |
|
| 45 |
+
# Gradio app
|
| 46 |
iface = gr.Interface(
|
| 47 |
fn=recommend_quote,
|
| 48 |
inputs=[
|