Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import time | |
| import pandas as pd | |
| from transformers import pipeline | |
| MAX_CHARS = 1500 | |
| History_size = 5 | |
| MODELS = { | |
| "Русский (rubert-tiny)": "cointegrated/rubert-tiny-sentiment-balanced", | |
| "Twitter-roBERTa": "cardiffnlp/twitter-roberta-base-sentiment-latest" | |
| } | |
| _pipes = {} | |
| # функция выбора конкретной модели | |
| def get_pipe(key): | |
| if key not in _pipes: | |
| _pipes[key] = pipeline("sentiment-analysis", model=MODELS[key]) | |
| return _pipes[key] # ← ВАЖНО: вернуть пайплайн | |
| # анализ текста с историей | |
| def analyze_text(text, model_key, history): | |
| start = time.time() | |
| if text is None or not text.strip(): | |
| return "Ошибка: пустой ввод", "", 0.0, history | |
| text = text.strip() | |
| if len(text) > MAX_CHARS: | |
| return "Ошибка: текст слишком длинный", "", 0.0, history | |
| try: | |
| a = get_pipe(model_key) | |
| res = a(text)[0] | |
| label = res["label"] | |
| score = round(float(res["score"]), 3) | |
| except Exception as e: | |
| return f"Ошибка модели: {e}", "", 0.0, history | |
| latency = round(time.time() - start, 3) | |
| record = f"[{model_key}] {text[:80]}... → {label} ({score}), {latency}s" | |
| if not history: | |
| history = [] | |
| history = history[-(History_size - 1):] | |
| history.append(record) | |
| return label, score, latency, history | |
| # функция для обработки файла | |
| def obr_file(file_obj, model_key): | |
| if file_obj is None: | |
| return pd.DataFrame({"error": ["Файл не загружен"]}) | |
| path = file_obj.name.lower() | |
| if not (path.endswith(".txt") or path.endswith(".csv")): | |
| return pd.DataFrame({"error": ["Поддерживаются только .txt и .csv"]}) | |
| try: | |
| if path.endswith(".txt"): | |
| with open(file_obj.name, "r", encoding="utf-8", errors="ignore") as f: | |
| texts = [x.strip() for x in f.read().splitlines() if x.strip()] | |
| else: | |
| df = pd.read_csv(file_obj.name) | |
| col = "text" if "text" in df.columns else df.columns[0] | |
| texts = df[col].astype(str).tolist() | |
| except Exception as e: | |
| return pd.DataFrame({"error": [f"Ошибка чтения файла: {e}"]}) | |
| a = get_pipe(model_key) | |
| rows = [] | |
| for t in texts: | |
| t = t[:MAX_CHARS] | |
| res = a(t)[0] | |
| rows.append({ | |
| "text": t, | |
| "label": res["label"], | |
| "score": round(float(res["score"]), 3) | |
| }) | |
| return pd.DataFrame(rows) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("🧠 Sentiment Analysis") | |
| with gr.Row(): | |
| text_input = gr.Textbox(label="Введите текст", lines=5) | |
| model_choice = gr.Dropdown(list(MODELS.keys()), value="Русский (rubert-tiny)", label="Модель") | |
| btn = gr.Button("Обработать") | |
| with gr.Row(): | |
| lab = gr.Textbox(label="Тональность") | |
| scr = gr.Textbox(label="Уверенность") | |
| lat = gr.Textbox(label="Время ответа (сек)") | |
| state = gr.State([]) | |
| box = gr.Textbox(label="История запросов (последние 5)", lines=5) | |
| btn.click( | |
| analyze_text, | |
| inputs=[text_input, model_choice, state], | |
| outputs=[lab, scr, lat, state] | |
| ).then( | |
| lambda h: "\n".join(h), | |
| inputs=state, | |
| outputs=box | |
| ) | |
| gr.Markdown("# Пакетная обработка (TXT/CSV)") | |
| file_in = gr.File(label="Загрузите файл (.txt или .csv)") | |
| bbtn = gr.Button("Обработать файл") | |
| obtn = gr.Dataframe(label="Результаты") | |
| bbtn.click(obr_file, inputs=[file_in, model_choice], outputs=obtn) | |
| gr.Examples( | |
| examples=[ | |
| ["Мне очень понравился этот фильм, всё отлично!", "Русский (rubert-tiny)"], | |
| ["Это худший опыт в моей жизни.", "Русский (rubert-tiny)"], | |
| ["The app is amazing!", "Twitter-roBERTa"], | |
| ["This is terrible and buggy.", "Twitter-roBERTa"] | |
| ], | |
| inputs=[text_input, model_choice], | |
| label="Примеры" | |
| ) | |
| demo.launch() |