Spaces:
Running
Running
| """ | |
| Boolook - μμ± κΈ°λ° κ°μ λΆμ μ± μΆμ² (HuggingFace Spaces) | |
| μμ μ¬ν: | |
| - μλ² λ© λ‘λ©μ λ°±κ·ΈλΌμ΄λ μ€λ λλ‘ λΆλ¦¬ (νμμμ λ°©μ§) | |
| - λ°°μΉ ν¬κΈ° 128λ‘ μ¦κ° (μλ ν₯μ) | |
| - μλ²κ° λ¨Όμ μ΄λ¦° λ€ λ°μ΄ν° λ‘λ© μ§ν | |
| - μΆμ² κ²°κ³Ό μΆλ ₯μ JSON νμμΌλ‘ λ¨μν | |
| - emotion_score: μ£Όκ°μ λ¨μΌ μμΉ | |
| - user_input / recommendation_books ν€ μ¬μ© | |
| """ | |
| import gradio as gr | |
| import pandas as pd | |
| import numpy as np | |
| import torch | |
| import pickle | |
| import csv | |
| import json | |
| import threading | |
| import warnings | |
| import logging | |
| from pathlib import Path | |
| from datetime import datetime | |
| from collections import defaultdict | |
| from typing import Dict, List, Tuple | |
| from transformers import pipeline as hf_pipeline | |
| from sentence_transformers import SentenceTransformer, util as sbert_util | |
| warnings.filterwarnings("ignore") | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # ============================================================ | |
| # μ€μ | |
| # ============================================================ | |
| BOOK_DB_PATH = Path("book_db_final.csv") | |
| FEEDBACK_PATH = Path("user_feedback.csv") | |
| SBERT_CACHE_PATH = Path("book_embeddings.pkl") | |
| SAMPLE_RATE = 16000 | |
| MAX_EMBEDDING_BATCH = 128 | |
| device = 0 if torch.cuda.is_available() else -1 | |
| logger.info(f"λλ°μ΄μ€: {'GPU' if device == 0 else 'CPU'}") | |
| # ============================================================ | |
| # μ μ μν (λ°±κ·ΈλΌμ΄λ λ‘λ©μ©) | |
| # ============================================================ | |
| df = pd.DataFrame() | |
| book_embeddings = torch.tensor([]) | |
| _data_ready = False | |
| _data_lock = threading.Lock() | |
| # ============================================================ | |
| # λͺ¨λΈ λ‘λ© | |
| # ============================================================ | |
| logger.info("λͺ¨λΈ λ‘λ© μ€...") | |
| stt_model = None | |
| try: | |
| stt_model = hf_pipeline( | |
| "automatic-speech-recognition", | |
| model="openai/whisper-small", | |
| device=device, | |
| chunk_length_s=30, | |
| ) | |
| logger.info("STT λͺ¨λΈ λ‘λ μλ£ (whisper-small)") | |
| except Exception as e: | |
| logger.error(f"STT λ‘λ μ€ν¨: {e}") | |
| sbert_model = None | |
| try: | |
| sbert_model = SentenceTransformer("jhgan/ko-sroberta-multitask") | |
| sbert_model.max_seq_length = 384 | |
| if torch.cuda.is_available(): | |
| sbert_model = sbert_model.to("cuda") | |
| logger.info("SBERT λͺ¨λΈ λ‘λ μλ£") | |
| except Exception as e: | |
| logger.error(f"SBERT λ‘λ μ€ν¨: {e}") | |
| audio_emotion_pipeline = None | |
| try: | |
| audio_emotion_pipeline = hf_pipeline( | |
| "audio-classification", | |
| model="superb/wav2vec2-base-superb-er", | |
| device=device, | |
| ) | |
| logger.info("μμ± κ°μ λͺ¨λΈ λ‘λ μλ£") | |
| except Exception as e: | |
| logger.warning(f"μμ± κ°μ λͺ¨λΈ μ€ν΅ (ν μ€νΈλ§ μ¬μ©): {e}") | |
| logger.info("λͺ¨λΈ λ‘λ© μλ£!") | |
| # ============================================================ | |
| # κ°μ λ μ΄λΈ & μ€λͺ | |
| # ============================================================ | |
| _EMOTION_DESCS = { | |
| "κΈ°μ¨": "ν볡νκ³ μ¦κ²κ³ μ μΎν κΈ°λΆ", | |
| "μ λ’°": "λ°λ»νκ³ μμ μ μ΄λ©° κ°μ‘±κ³Ό μ°μ κ°μ μ λκ°", | |
| "곡ν¬": "무μκ³ κΈ΄μ₯λλ©° μ€λ¦΄ μλ 곡ν¬μ λΆμ", | |
| "λλ": "λ°μ κ³Ό 좩격, μμμΉ λͺ»ν κ²½μ΄λ‘μ", | |
| "μ¬ν": "μ¬νκ³ μΈλ‘κ³ μ΄λ³κ³Ό μμ€μ κ°μ ", | |
| "νμ€": "λΆμ‘°λ¦¬μ λΆνλ±, μμ μ λν λΉνκ³Ό νμ", | |
| "λΆλ Έ": "λΆλ Έμ μ ν, ν¬μκ³Ό κ°λ±", | |
| "κΈ°λ": "μ±μ₯κ³Ό λμ , λͺ¨νκ³Ό ν¬λ§", | |
| } | |
| _EMOTION_LABELS = list(_EMOTION_DESCS.keys()) | |
| _LABEL_EMBS = None | |
| if sbert_model: | |
| try: | |
| _LABEL_EMBS = sbert_model.encode( | |
| list(_EMOTION_DESCS.values()), | |
| convert_to_tensor=True, | |
| show_progress_bar=False, | |
| ) | |
| except Exception as e: | |
| logger.error(f"κ°μ λ μ΄λΈ μλ² λ© μ€ν¨: {e}") | |
| _AUDIO_LABEL_MAP = {"hap": "κΈ°μ¨", "neu": "μ λ’°", "sad": "μ¬ν", "ang": "λΆλ Έ"} | |
| _KEYWORD_BOOSTS = { | |
| "μ¬ν": ["μ¬ν", "μ°μΈ", "λλ¬Ό", "νλ€", "μΈλ‘"], | |
| "λΆλ Έ": ["νλ", "μ§μ¦", "μ΄λ°", "λΉ‘μΉ", "μ΅μΈ"], | |
| "κΈ°μ¨": ["ν볡", "μ’λ€", "κΈ°μ", "μ¦κ²", "μ λ"], | |
| "곡ν¬": ["무μ", "λλ ΅", "κ±±μ ", "λΆμ"], | |
| "λλ": ["λλ", "κΉμ§", "좩격"], | |
| "μ λ’°": ["λ―Ώμ", "μ¬λ", "λ°λ»", "κ³ λ§"], | |
| "κΈ°λ": ["κΈ°λ", "ν¬λ§", "μ€λ "], | |
| } | |
| # ============================================================ | |
| # μΈμ νΌλλ°± | |
| # ============================================================ | |
| class SessionFeedback: | |
| def __init__(self): | |
| self.accepted_counts = defaultdict(int) | |
| self.rejected_counts = defaultdict(int) | |
| def score_multiplier(self, emotion: str) -> float: | |
| acc = self.accepted_counts.get(emotion, 0) | |
| rej = self.rejected_counts.get(emotion, 0) | |
| return max(0.5, min(2.0, 1.0 + (0.1 * acc) - (0.1 * rej))) | |
| _session = SessionFeedback() | |
| # ============================================================ | |
| # λμ λ°μ΄ν° λ‘λ (λ°±κ·ΈλΌμ΄λ μ μ©) | |
| # ============================================================ | |
| def load_book_data(): | |
| global df, book_embeddings, _data_ready | |
| if not BOOK_DB_PATH.exists(): | |
| logger.error(f"{BOOK_DB_PATH} νμΌμ΄ μμ΅λλ€.") | |
| return | |
| try: | |
| _df = pd.read_csv(BOOK_DB_PATH, encoding="utf-8-sig").fillna("") | |
| logger.info(f"{len(_df)}κΆ λ‘λ μλ£") | |
| except Exception as e: | |
| logger.error(f"CSV λ‘λ μ€ν¨: {e}") | |
| return | |
| emb_cache = {} | |
| if SBERT_CACHE_PATH.exists(): | |
| try: | |
| with open(SBERT_CACHE_PATH, "rb") as f: | |
| emb_cache = pickle.load(f) | |
| logger.info(f"μλ² λ© μΊμ: {len(emb_cache)}κ°") | |
| except Exception as e: | |
| logger.warning(f"μΊμ λ‘λ μ€ν¨: {e}") | |
| missing = [i for i, row in _df.iterrows() if str(row["isbn"]) not in emb_cache] | |
| if missing and sbert_model: | |
| logger.info(f"μ κ· μλ² λ© κ³μ°: {len(missing)}κΆ") | |
| try: | |
| for start in range(0, len(missing), MAX_EMBEDDING_BATCH): | |
| batch = missing[start:start + MAX_EMBEDDING_BATCH] | |
| texts = [ | |
| (str(_df.at[i, "title"]) + " " + str(_df.at[i, "content"]))[:500] | |
| for i in batch | |
| ] | |
| vecs = sbert_model.encode( | |
| texts, convert_to_tensor=False, show_progress_bar=False, | |
| batch_size=MAX_EMBEDDING_BATCH, | |
| ) | |
| for i, vec in zip(batch, vecs): | |
| emb_cache[str(_df.at[i, "isbn"])] = vec | |
| if (start // MAX_EMBEDDING_BATCH) % 10 == 0: | |
| logger.info(f" μ§ν: {start}/{len(missing)}") | |
| with open(SBERT_CACHE_PATH, "wb") as f: | |
| pickle.dump(emb_cache, f) | |
| logger.info("μλ² λ© μ μ₯ μλ£") | |
| except Exception as e: | |
| logger.error(f"μλ² λ© κ³μ° μ€ν¨: {e}") | |
| try: | |
| emb_matrix = np.stack([ | |
| emb_cache.get(str(row["isbn"]), np.zeros(384)) | |
| for _, row in _df.iterrows() | |
| ]) | |
| _book_emb = torch.tensor(emb_matrix, dtype=torch.float32) | |
| if torch.cuda.is_available(): | |
| _book_emb = _book_emb.to("cuda") | |
| except Exception as e: | |
| logger.error(f"μλ² λ© νλ ¬ μμ± μ€ν¨: {e}") | |
| _book_emb = torch.tensor([]) | |
| with _data_lock: | |
| df = _df | |
| book_embeddings = _book_emb | |
| _data_ready = True | |
| logger.info("λ°±κ·ΈλΌμ΄λ λ°μ΄ν° λ‘λ μλ£!") | |
| threading.Thread(target=load_book_data, daemon=True).start() | |
| # ============================================================ | |
| # κ°μ λΆμ | |
| # ============================================================ | |
| def text_emotion_scores(text: str) -> Dict[str, float]: | |
| scores = {emo: 0.0 for emo in _EMOTION_LABELS} | |
| if not text or not sbert_model or _LABEL_EMBS is None: | |
| return scores | |
| try: | |
| user_emb = sbert_model.encode(text, convert_to_tensor=True, show_progress_bar=False) | |
| cos_scores = sbert_util.cos_sim(user_emb, _LABEL_EMBS)[0] | |
| for i, label in enumerate(_EMOTION_LABELS): | |
| scores[label] = float(cos_scores[i].item()) | |
| except Exception as e: | |
| logger.error(f"ν μ€νΈ κ°μ μ€ν¨: {e}") | |
| text_lower = text.lower() | |
| for emotion, keywords in _KEYWORD_BOOSTS.items(): | |
| for kw in keywords: | |
| if kw in text_lower: | |
| scores[emotion] += 0.15 | |
| break | |
| total = sum(scores.values()) | |
| if total > 0: | |
| scores = {k: v / total for k, v in scores.items()} | |
| return scores | |
| def audio_emotion_scores(audio_array: np.ndarray, sr: int) -> Dict[str, float]: | |
| scores = {emo: 0.0 for emo in _EMOTION_LABELS} | |
| if audio_emotion_pipeline is None: | |
| return scores | |
| try: | |
| import scipy.io.wavfile as wav_io | |
| import tempfile | |
| with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp: | |
| wav_io.write(tmp.name, sr, (audio_array * 32767).astype(np.int16)) | |
| results = audio_emotion_pipeline(tmp.name) | |
| Path(tmp.name).unlink(missing_ok=True) | |
| for item in results: | |
| mapped = _AUDIO_LABEL_MAP.get(item["label"]) | |
| if mapped: | |
| scores[mapped] += item["score"] | |
| except Exception as e: | |
| logger.warning(f"μμ± κ°μ μ€ν¨: {e}") | |
| return scores | |
| def fused_emotion(t_scores: Dict[str, float], a_scores: Dict[str, float]) -> Tuple[str, Dict[str, float]]: | |
| if all(v == 0 for v in a_scores.values()): | |
| combined = t_scores | |
| else: | |
| a_max = max(a_scores.values()) or 1.0 | |
| a_norm = {e: v / a_max for e, v in a_scores.items()} | |
| combined = { | |
| emo: (t_scores[emo] * 0.7) + (a_norm[emo] * 0.3) | |
| for emo in _EMOTION_LABELS | |
| } | |
| top_emotion = max(combined, key=combined.get) | |
| return top_emotion, combined | |
| # ============================================================ | |
| # μΆμ² | |
| # ============================================================ | |
| def get_recommendations(user_input: str, emotion: str, top_n: int = 3) -> List[Dict]: | |
| with _data_lock: | |
| ready = _data_ready | |
| _df = df | |
| _emb = book_embeddings | |
| if not ready or sbert_model is None or _df.empty or len(_emb) == 0: | |
| return [] | |
| try: | |
| session_w = _session.score_multiplier(emotion) | |
| user_vec = sbert_model.encode(user_input, convert_to_tensor=True, show_progress_bar=False) | |
| cos_sims = sbert_util.cos_sim(user_vec, _emb)[0] | |
| if torch.cuda.is_available(): | |
| cos_sims = cos_sims.cpu() | |
| cos_sims = cos_sims.numpy() | |
| fb_weights = _load_feedback_weights() | |
| results = [] | |
| for idx, (_, row) in enumerate(_df.iterrows()): | |
| if idx >= len(cos_sims): | |
| break | |
| fb_boost = fb_weights.get((emotion, str(row["title"])), 0) * 0.1 | |
| cosine = float(cos_sims[idx]) | |
| final = cosine * session_w + fb_boost | |
| results.append({ | |
| "isbn": str(row.get("isbn", "")), | |
| "title": str(row.get("title", "")), | |
| "author": str(row.get("author", "-")), | |
| "publisher": str(row.get("publisher", "-")), | |
| "content": str(row.get("content", ""))[:150], | |
| "img_url": str(row.get("img_url", "")), | |
| "score": round(final, 3), | |
| }) | |
| results.sort(key=lambda x: x["score"], reverse=True) | |
| return results[:top_n] | |
| except Exception as e: | |
| logger.error(f"μΆμ² μ€ν¨: {e}") | |
| return [] | |
| # ============================================================ | |
| # μΆμ² κ²°κ³Ό β JSON λ λλ§ | |
| # ============================================================ | |
| def _render_books_json(user_input: str, emotion: str, combined: Dict[str, float], books: List[Dict]) -> str: | |
| if not books: | |
| return json.dumps({"error": "μΆμ²ν μ± μ μ°Ύμ§ λͺ»νμ΅λλ€."}, ensure_ascii=False, indent=2) | |
| output = { | |
| "user_input": user_input, | |
| "emotion": emotion, | |
| "emotion_score": round(combined.get(emotion, 0.0), 3), | |
| "recommendation_books": [ | |
| { | |
| "isbn": b["isbn"], | |
| "title": b["title"], | |
| "author": b["author"], | |
| "publisher": b["publisher"], | |
| "content": b["content"], | |
| "img_url": b["img_url"], | |
| } | |
| for b in books | |
| ], | |
| } | |
| return json.dumps(output, ensure_ascii=False, indent=2) | |
| # ============================================================ | |
| # νΌλλ°± | |
| # ============================================================ | |
| def _load_feedback_weights() -> Dict[Tuple[str, str], float]: | |
| if not FEEDBACK_PATH.exists(): | |
| return {} | |
| try: | |
| fb_df = pd.read_csv(FEEDBACK_PATH, encoding="utf-8-sig", on_bad_lines="skip") | |
| weights = {} | |
| for _, row in fb_df.iterrows(): | |
| key = (str(row.get("emotion", "")), str(row.get("title", ""))) | |
| accepted = int(row.get("accepted", 0)) | |
| weights[key] = weights.get(key, 0) + (1.0 if accepted == 1 else -0.5) | |
| return weights | |
| except Exception: | |
| return {} | |
| def save_feedback_csv(isbn: str, title: str, emotion: str, accepted: int, rank: int): | |
| try: | |
| data = { | |
| "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), | |
| "isbn": isbn, | |
| "title": title.replace("\n", " ").replace("\r", " "), | |
| "emotion": emotion, | |
| "accepted": accepted, | |
| "rank": rank, | |
| } | |
| pd.DataFrame([data]).to_csv( | |
| FEEDBACK_PATH, mode="a", index=False, | |
| header=not FEEDBACK_PATH.exists(), | |
| encoding="utf-8-sig", quoting=csv.QUOTE_ALL, | |
| ) | |
| if accepted == 1: | |
| _session.accepted_counts[emotion] += 1 | |
| else: | |
| _session.rejected_counts[emotion] += 1 | |
| except Exception as e: | |
| logger.error(f"νΌλλ°± μ μ₯ μ€ν¨: {e}") | |
| def get_feedback_stats() -> str: | |
| if not FEEDBACK_PATH.exists(): | |
| return "π μμ§ νΌλλ°±μ΄ μμ΅λλ€." | |
| try: | |
| fb_df = pd.read_csv(FEEDBACK_PATH, encoding="utf-8-sig", on_bad_lines="skip") | |
| total = len(fb_df) | |
| if total == 0: | |
| return "π μμ§ νΌλλ°±μ΄ μμ΅λλ€." | |
| emo_counts = fb_df.groupby("emotion")["accepted"].agg(["count", "sum"]) | |
| lines = [f"**μ΄ νΌλλ°±: {total}건**\n"] | |
| for emo, row_s in emo_counts.iterrows(): | |
| count = int(row_s["count"]) | |
| accepted = int(row_s["sum"]) | |
| rate = (accepted / count * 100) if count > 0 else 0 | |
| lines.append(f"- {emo}: {count}건 (μλ½λ₯ {rate:.0f}%)") | |
| return "\n".join(lines) | |
| except Exception as e: | |
| return f"ν΅κ³ λ‘λ μ€ν¨: {e}" | |
| # ============================================================ | |
| # λ©μΈ μ²λ¦¬ | |
| # ============================================================ | |
| def process_voice(audio_input): | |
| if not _data_ready: | |
| return json.dumps({"error": "λμ λ°μ΄ν° λ‘λ© μ€μ λλ€. μ μ ν λ€μ μλν΄μ£ΌμΈμ."}, ensure_ascii=False, indent=2), [], "" | |
| if audio_input is None: | |
| return json.dumps({"error": "μμ±μ λ Ήμν΄μ£ΌμΈμ."}, ensure_ascii=False, indent=2), [], "" | |
| if stt_model is None: | |
| return json.dumps({"error": "STT λͺ¨λΈμ΄ λ‘λλμ§ μμμ΅λλ€."}, ensure_ascii=False, indent=2), [], "" | |
| try: | |
| sr, y = audio_input | |
| if len(y) == 0: | |
| return json.dumps({"error": "μμ±μ΄ λ무 μ§§μ΅λλ€."}, ensure_ascii=False, indent=2), [], "" | |
| y = y.astype(np.float32) | |
| max_v = np.max(np.abs(y)) | |
| if max_v > 0: | |
| y = y / max_v | |
| stt_result = stt_model({"sampling_rate": sr, "raw": y}) | |
| user_input = stt_result["text"].strip() | |
| if not user_input: | |
| return json.dumps({"error": "μμ±μ΄ μΈμλμ§ μμμ΅λλ€."}, ensure_ascii=False, indent=2), [], "" | |
| t_scores = text_emotion_scores(user_input) | |
| a_scores = audio_emotion_scores(y, sr) | |
| top_label, combined = fused_emotion(t_scores, a_scores) | |
| books = get_recommendations(user_input, top_label, top_n=3) | |
| books_json = _render_books_json(user_input, top_label, combined, books) | |
| return books_json, books, top_label | |
| except Exception as e: | |
| logger.error(f"μ²λ¦¬ μ€λ₯: {e}") | |
| return json.dumps({"error": str(e)}, ensure_ascii=False, indent=2), [], "" | |
| def on_feedback(books_state: list, emotion: str, rank_str: str, liked: bool): | |
| try: | |
| rank = int(rank_str) - 1 | |
| if not books_state or rank < 0 or rank >= len(books_state): | |
| return "μ± μ λ¨Όμ μΆμ²λ°μμ£ΌμΈμ." | |
| book = books_state[rank] | |
| accepted = 1 if liked else 0 | |
| save_feedback_csv(book["isbn"], book["title"], emotion, accepted, rank + 1) | |
| icon = "π" if liked else "π" | |
| return f"{icon} '{book['title']}' νΌλλ°±μ΄ μ μ₯λμμ΅λλ€!" | |
| except Exception as e: | |
| return f"νΌλλ°± μ μ₯ μ€ν¨: {e}" | |
| def run_analysis(audio): | |
| books_json, books, emotion = process_voice(audio) | |
| return books_json, books, emotion | |
| # ============================================================ | |
| # Gradio UI | |
| # ============================================================ | |
| with gr.Blocks(theme=gr.themes.Soft(), title="Boolook π") as demo: | |
| gr.Markdown(""" | |
| # π Boolook β μμ± κΈ°λ° κ°μ λΆμ μ± μΆμ² | |
| λΉμ μ κ°μ μ λ§λ‘ νννλ©΄, AIκ° λ± λ§λ μ± μ μΆμ²ν΄λ립λλ€. | |
| π€ **μ¬μ©λ²:** λ§μ΄ν¬λ‘ κ°μ νν β λΆμνκΈ° β νΌλλ°± λ¨κΈ°κΈ° | |
| """) | |
| state_books = gr.State([]) | |
| state_emotion = gr.State("") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π€ μμ± μ λ ₯") | |
| audio_in = gr.Audio(sources=["microphone"], type="numpy", label="λ§μ΄ν¬λ‘ κ°μ νννκΈ°") | |
| analyze_btn = gr.Button("π λΆμνκΈ°", variant="primary", size="lg") | |
| gr.Markdown("π‘ μ: 'μ€λ λ무 μ¬νΌμ', 'ν볡ν κΈ°λΆμ΄μμ'") | |
| with gr.Column(scale=1): | |
| out_books_json = gr.Code( | |
| label="π λΆμ κ²°κ³Ό & π μΆμ² λμ", | |
| language="json", | |
| interactive=False, | |
| ) | |
| with gr.Accordion("π¬ νΌλλ°±", open=True): | |
| gr.Markdown("μΆμ²λ°μ μ± μ νκ°λ₯Ό λ¨κ²¨μ£ΌμΈμ!") | |
| with gr.Row(): | |
| rank_radio = gr.Radio(["1", "2", "3"], label="μ± λ²νΈ", value="1") | |
| like_btn = gr.Button("π μ’μμ", variant="primary") | |
| dislike_btn = gr.Button("π μ«μ΄μ", variant="secondary") | |
| feedback_out = gr.Textbox(label="νΌλλ°± κ²°κ³Ό", interactive=False) | |
| with gr.Accordion("π ν΅κ³", open=False): | |
| stats_md = gr.Markdown("μλ‘κ³ μΉ¨μ λλ¬μ£ΌμΈμ.") | |
| refresh_btn = gr.Button("π ν΅κ³ μλ‘κ³ μΉ¨") | |
| refresh_btn.click(fn=get_feedback_stats, outputs=stats_md) | |
| analyze_btn.click( | |
| fn=run_analysis, | |
| inputs=audio_in, | |
| outputs=[out_books_json, state_books, state_emotion], | |
| ) | |
| like_btn.click( | |
| fn=lambda b, e, r: on_feedback(b, e, r, True), | |
| inputs=[state_books, state_emotion, rank_radio], | |
| outputs=feedback_out, | |
| ) | |
| dislike_btn.click( | |
| fn=lambda b, e, r: on_feedback(b, e, r, False), | |
| inputs=[state_books, state_emotion, rank_radio], | |
| outputs=feedback_out, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |