Spaces:
Sleeping
Sleeping
| import os | |
| import time | |
| import gradio as gr | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import base64 | |
| from io import BytesIO | |
| import requests | |
| from core.dashboard import ErohaDashboard | |
| from core.intelligence import update_memory, summarize_context | |
| from core.selfcheck import evaluate_answer, improve_answer | |
| from core.learning import analyze_user_input, adapt_answer | |
| from core.model_selector import choose_model | |
| # ============================== | |
| # 🔐 ENV | |
| # ============================== | |
| HF_TOKEN = os.getenv("HF_TOKEN", "") | |
| LOG_FILE = "logs/history.csv" | |
| os.makedirs("logs", exist_ok=True) | |
| dashboard = ErohaDashboard() | |
| # ============================== | |
| # 🔁 Автоматический Router с Failover | |
| # ============================== | |
| def query_huggingface_router(prompt, model_id): | |
| """ | |
| Универсальный запрос к Router API с fallback на прямую модель. | |
| """ | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| payload = { | |
| "inputs": prompt, | |
| "parameters": {"max_new_tokens": 600, "temperature": 0.7}, | |
| } | |
| try: | |
| # 1️⃣ Основной Router API | |
| response = requests.post( | |
| "https://router.huggingface.co/models/" + model_id, | |
| headers=headers, | |
| json=payload, | |
| timeout=30, | |
| ) | |
| if response.status_code == 200: | |
| return response.json()[0]["generated_text"] | |
| elif response.status_code == 410: | |
| raise RuntimeError("Old endpoint rejected (410)") | |
| else: | |
| raise RuntimeError(f"Router error: {response.status_code}") | |
| except Exception as e: | |
| print("⚠️ Router недоступен, выполняется fallback:", str(e)) | |
| # 2️⃣ Fallback: прямое обращение к конкретной модели | |
| fallback_url = f"https://huggingface.co/api/models/{model_id}" | |
| try: | |
| response = requests.post(fallback_url, headers=headers, json=payload, timeout=30) | |
| if response.status_code == 200: | |
| return response.json()[0]["generated_text"] | |
| else: | |
| raise RuntimeError(f"Fallback error: {response.status_code}") | |
| except Exception as e2: | |
| return f"❌ Ошибка при обращении к API: {str(e2)}" | |
| # ============================== | |
| # 🧠 Основная логика | |
| # ============================== | |
| def generate_response(user_input): | |
| try: | |
| start = time.time() | |
| prefs = analyze_user_input(user_input) | |
| model_id = choose_model(user_input) | |
| result = query_huggingface_router(user_input, model_id) | |
| check = evaluate_answer(result) | |
| improved = improve_answer(result) | |
| personalized = adapt_answer(improved) | |
| update_memory(user_input, personalized) | |
| context = summarize_context() | |
| response_time = round(time.time() - start, 2) | |
| dashboard.log_request(model_id, prefs["category"], response_time) | |
| df = pd.DataFrame([{ | |
| "time": time.strftime("%Y-%m-%d %H:%M:%S"), | |
| "model": model_id, | |
| "category": prefs["category"], | |
| "response_time": response_time, | |
| "prompt": user_input, | |
| "response": personalized[:1500], | |
| }]) | |
| df.to_csv(LOG_FILE, mode="a", index=False, header=not os.path.exists(LOG_FILE)) | |
| summary = ( | |
| f"🧠 **Модель:** `{model_id}`\n" | |
| f"⚡ **Время отклика:** {response_time} сек\n" | |
| f"🔍 **Проверка качества:** {check['result']}\n" | |
| f"{'; '.join(check['feedback']) if check['feedback'] else '✅ Всё отлично'}\n\n" | |
| f"{context}" | |
| ) | |
| return f"{personalized}\n\n{summary}" | |
| except Exception as e: | |
| return f"❌ Ошибка выполнения: {str(e)}" | |
| # ============================== | |
| # 📈 Визуализация аналитики | |
| # ============================== | |
| def generate_chart(): | |
| if not os.path.exists(LOG_FILE): | |
| return "⚠️ Нет данных для графика." | |
| df = pd.read_csv(LOG_FILE) | |
| if df.empty: | |
| return "⚠️ История пуста." | |
| df["time"] = pd.to_datetime(df["time"]) | |
| df = df.tail(50) | |
| plt.figure(figsize=(8, 4)) | |
| plt.plot(df["time"], df["response_time"], marker="o", linewidth=1.5) | |
| plt.title("⚡ Скорость отклика моделей (последние 50 запросов)") | |
| plt.xlabel("Время") | |
| plt.ylabel("Секунды") | |
| plt.grid(True) | |
| buffer = BytesIO() | |
| plt.savefig(buffer, format="png", bbox_inches="tight") | |
| buffer.seek(0) | |
| img_base64 = base64.b64encode(buffer.read()).decode("utf-8") | |
| plt.close() | |
| return f"<img src='data:image/png;base64,{img_base64}'/>" | |
| def show_dashboard(): | |
| metrics_text, df = dashboard.dashboard_ui() | |
| chart_html = generate_chart() | |
| return metrics_text, df, chart_html | |
| # ============================== | |
| # 🎨 Интерфейс Gradio | |
| # ============================== | |
| with gr.Blocks(title="Eroha AgentAPI v5.6 — AutoRouter Failover Core", theme="soft") as app: | |
| gr.Markdown("# 🤖 Eroha AgentAPI v5.6 — Guru Edition (AutoRouter + Failover Core)") | |
| gr.Markdown("**Автоматический Router + fallback + визуальная аналитика** ⚙️") | |
| with gr.Tab("💬 Agent Chat"): | |
| user_input = gr.Textbox(label="Введите запрос", placeholder="Например: придумай философскую сказку об ИИ.") | |
| output_box = gr.Textbox(label="Ответ", lines=15) | |
| submit_btn = gr.Button("🚀 Отправить") | |
| submit_btn.click(fn=generate_response, inputs=user_input, outputs=output_box) | |
| with gr.Tab("📊 Dashboard"): | |
| metrics = gr.Markdown(label="📈 Общая статистика") | |
| log_table = gr.Dataframe(headers=["time", "model", "category", "response_time"], label="История запросов") | |
| chart_box = gr.HTML() | |
| refresh = gr.Button("🔄 Обновить дашборд") | |
| refresh.click(show_dashboard, outputs=[metrics, log_table, chart_box]) | |
| app.launch(server_name="0.0.0.0", server_port=7860) | |