Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,9 +3,9 @@ import time
|
|
| 3 |
import gradio as gr
|
| 4 |
import pandas as pd
|
| 5 |
import matplotlib.pyplot as plt
|
| 6 |
-
from huggingface_hub import InferenceClient
|
| 7 |
from io import BytesIO
|
| 8 |
import base64
|
|
|
|
| 9 |
|
| 10 |
from core.dashboard import ErohaDashboard
|
| 11 |
from core.intelligence import update_memory, summarize_context
|
|
@@ -14,78 +14,75 @@ from core.learning import analyze_user_input, adapt_answer
|
|
| 14 |
from core.model_selector import choose_model
|
| 15 |
|
| 16 |
# ==============================
|
| 17 |
-
# 🔐
|
| 18 |
# ==============================
|
| 19 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# ==============================
|
| 24 |
-
#
|
| 25 |
# ==============================
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
# ==============================
|
| 31 |
-
#
|
| 32 |
# ==============================
|
| 33 |
def generate_response(user_input):
|
| 34 |
try:
|
| 35 |
start = time.time()
|
| 36 |
-
|
| 37 |
-
# 1️⃣ Анализ запроса
|
| 38 |
prefs = analyze_user_input(user_input)
|
| 39 |
model_id = choose_model(user_input)
|
|
|
|
| 40 |
|
| 41 |
-
# 2️⃣ Router API (новый параметр base_url)
|
| 42 |
-
client = InferenceClient(
|
| 43 |
-
model=model_id,
|
| 44 |
-
token=HF_TOKEN,
|
| 45 |
-
base_url="https://router.huggingface.co" # ✅ исправлено
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
# 3️⃣ Запрос
|
| 49 |
result = client.text_generation(
|
| 50 |
user_input,
|
| 51 |
max_new_tokens=600,
|
| 52 |
-
temperature=0.7
|
| 53 |
)
|
| 54 |
|
| 55 |
-
# 4️⃣ Самоанализ и улучшение
|
| 56 |
check = evaluate_answer(result)
|
| 57 |
improved = improve_answer(result)
|
| 58 |
personalized = adapt_answer(improved)
|
| 59 |
-
|
| 60 |
-
# 5️⃣ Обновление памяти
|
| 61 |
update_memory(user_input, personalized)
|
| 62 |
context = summarize_context()
|
| 63 |
|
| 64 |
-
# 6️⃣ Запись метрик
|
| 65 |
response_time = round(time.time() - start, 2)
|
| 66 |
dashboard.log_request(model_id, prefs["category"], response_time)
|
| 67 |
|
| 68 |
-
|
| 69 |
-
log_entry = {
|
| 70 |
"time": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 71 |
"model": model_id,
|
| 72 |
"category": prefs["category"],
|
| 73 |
"response_time": response_time,
|
| 74 |
"prompt": user_input,
|
| 75 |
-
"response": personalized[:
|
| 76 |
-
}
|
| 77 |
-
df =
|
| 78 |
-
|
| 79 |
-
df.to_csv(LOG_FILE, mode="a", index=False, header=False)
|
| 80 |
-
else:
|
| 81 |
-
df.to_csv(LOG_FILE, index=False)
|
| 82 |
-
|
| 83 |
-
# 8️⃣ Формирование вывода
|
| 84 |
summary = (
|
| 85 |
f"🧠 **Модель:** `{model_id}`\n"
|
| 86 |
-
f"🧩 **Тип запроса:** {prefs['category']}\n"
|
| 87 |
f"⚡ **Время отклика:** {response_time} сек\n"
|
| 88 |
-
f"🔍
|
| 89 |
f"{'; '.join(check['feedback']) if check['feedback'] else '✅ Всё отлично'}\n\n"
|
| 90 |
f"{context}"
|
| 91 |
)
|
|
@@ -96,22 +93,21 @@ def generate_response(user_input):
|
|
| 96 |
return f"❌ Ошибка выполнения: {str(e)}"
|
| 97 |
|
| 98 |
# ==============================
|
| 99 |
-
# 📈
|
| 100 |
# ==============================
|
| 101 |
def generate_chart():
|
| 102 |
if not os.path.exists(LOG_FILE):
|
| 103 |
-
return "⚠️
|
| 104 |
-
|
| 105 |
df = pd.read_csv(LOG_FILE)
|
| 106 |
if df.empty:
|
| 107 |
-
return "⚠️
|
| 108 |
-
|
| 109 |
df["time"] = pd.to_datetime(df["time"])
|
| 110 |
-
df = df.tail(
|
| 111 |
|
| 112 |
plt.figure(figsize=(8, 4))
|
| 113 |
-
plt.plot(df["time"], df["response_time"], marker="o")
|
| 114 |
-
plt.title("
|
| 115 |
plt.xlabel("Время")
|
| 116 |
plt.ylabel("Время отклика (сек)")
|
| 117 |
plt.grid(True)
|
|
@@ -121,26 +117,22 @@ def generate_chart():
|
|
| 121 |
buffer.seek(0)
|
| 122 |
img_base64 = base64.b64encode(buffer.read()).decode("utf-8")
|
| 123 |
plt.close()
|
| 124 |
-
|
| 125 |
return f"<img src='data:image/png;base64,{img_base64}'/>"
|
| 126 |
|
| 127 |
-
# ==============================
|
| 128 |
-
# 📊 Отображение дашборда
|
| 129 |
-
# ==============================
|
| 130 |
def show_dashboard():
|
| 131 |
metrics_text, df = dashboard.dashboard_ui()
|
| 132 |
chart_html = generate_chart()
|
| 133 |
return metrics_text, df, chart_html
|
| 134 |
|
| 135 |
# ==============================
|
| 136 |
-
# 🎨
|
| 137 |
# ==============================
|
| 138 |
-
with gr.Blocks(title="Eroha AgentAPI v5.
|
| 139 |
-
gr.Markdown("# 🤖 Eroha AgentAPI v5.
|
| 140 |
-
gr.Markdown("**Интеллект + самообучение + аналитика +
|
| 141 |
|
| 142 |
with gr.Tab("💬 Agent Chat"):
|
| 143 |
-
user_input = gr.Textbox(label="Введите запрос", placeholder="Например:
|
| 144 |
output_box = gr.Textbox(label="Ответ", lines=15)
|
| 145 |
submit_btn = gr.Button("🚀 Отправить")
|
| 146 |
submit_btn.click(fn=generate_response, inputs=user_input, outputs=output_box)
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
import pandas as pd
|
| 5 |
import matplotlib.pyplot as plt
|
|
|
|
| 6 |
from io import BytesIO
|
| 7 |
import base64
|
| 8 |
+
from huggingface_hub import InferenceClient
|
| 9 |
|
| 10 |
from core.dashboard import ErohaDashboard
|
| 11 |
from core.intelligence import update_memory, summarize_context
|
|
|
|
| 14 |
from core.model_selector import choose_model
|
| 15 |
|
| 16 |
# ==============================
|
| 17 |
+
# 🔐 ENV
|
| 18 |
# ==============================
|
| 19 |
+
HF_TOKEN = os.getenv("HF_TOKEN", "")
|
| 20 |
+
LOG_FILE = "logs/history.csv"
|
| 21 |
+
os.makedirs("logs", exist_ok=True)
|
| 22 |
+
|
| 23 |
+
dashboard = ErohaDashboard()
|
| 24 |
|
| 25 |
# ==============================
|
| 26 |
+
# 🔍 Определение типа подключения
|
| 27 |
# ==============================
|
| 28 |
+
def create_inference_client(model_id):
|
| 29 |
+
"""
|
| 30 |
+
Автоматически выбирает нужный тип API (Router / Direct / Local)
|
| 31 |
+
"""
|
| 32 |
+
# Router API
|
| 33 |
+
if "router.huggingface.co" in model_id or model_id.lower() == "router":
|
| 34 |
+
print("🔁 Router API Mode активирован")
|
| 35 |
+
return InferenceClient(base_url="https://router.huggingface.co", token=HF_TOKEN)
|
| 36 |
+
|
| 37 |
+
# Local API
|
| 38 |
+
elif model_id.startswith("http://") or model_id.startswith("https://"):
|
| 39 |
+
print("🖥️ Local/Custom API Mode активирован:", model_id)
|
| 40 |
+
return InferenceClient(base_url=model_id, token=HF_TOKEN)
|
| 41 |
+
|
| 42 |
+
# Direct Model
|
| 43 |
+
else:
|
| 44 |
+
print("⚙️ Direct Model Mode активирован:", model_id)
|
| 45 |
+
return InferenceClient(model=model_id, token=HF_TOKEN)
|
| 46 |
|
| 47 |
# ==============================
|
| 48 |
+
# 🧠 Генерация ответа
|
| 49 |
# ==============================
|
| 50 |
def generate_response(user_input):
|
| 51 |
try:
|
| 52 |
start = time.time()
|
|
|
|
|
|
|
| 53 |
prefs = analyze_user_input(user_input)
|
| 54 |
model_id = choose_model(user_input)
|
| 55 |
+
client = create_inference_client(model_id)
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
result = client.text_generation(
|
| 58 |
user_input,
|
| 59 |
max_new_tokens=600,
|
| 60 |
+
temperature=0.7,
|
| 61 |
)
|
| 62 |
|
|
|
|
| 63 |
check = evaluate_answer(result)
|
| 64 |
improved = improve_answer(result)
|
| 65 |
personalized = adapt_answer(improved)
|
|
|
|
|
|
|
| 66 |
update_memory(user_input, personalized)
|
| 67 |
context = summarize_context()
|
| 68 |
|
|
|
|
| 69 |
response_time = round(time.time() - start, 2)
|
| 70 |
dashboard.log_request(model_id, prefs["category"], response_time)
|
| 71 |
|
| 72 |
+
df = pd.DataFrame([{
|
|
|
|
| 73 |
"time": time.strftime("%Y-%m-%d %H:%M:%S"),
|
| 74 |
"model": model_id,
|
| 75 |
"category": prefs["category"],
|
| 76 |
"response_time": response_time,
|
| 77 |
"prompt": user_input,
|
| 78 |
+
"response": personalized[:1500],
|
| 79 |
+
}])
|
| 80 |
+
df.to_csv(LOG_FILE, mode="a", index=False, header=not os.path.exists(LOG_FILE))
|
| 81 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
summary = (
|
| 83 |
f"🧠 **Модель:** `{model_id}`\n"
|
|
|
|
| 84 |
f"⚡ **Время отклика:** {response_time} сек\n"
|
| 85 |
+
f"🔍 **Качество:** {check['result']}\n"
|
| 86 |
f"{'; '.join(check['feedback']) if check['feedback'] else '✅ Всё отлично'}\n\n"
|
| 87 |
f"{context}"
|
| 88 |
)
|
|
|
|
| 93 |
return f"❌ Ошибка выполнения: {str(e)}"
|
| 94 |
|
| 95 |
# ==============================
|
| 96 |
+
# 📈 Визуализация аналитики
|
| 97 |
# ==============================
|
| 98 |
def generate_chart():
|
| 99 |
if not os.path.exists(LOG_FILE):
|
| 100 |
+
return "⚠️ Нет данных для отображения."
|
|
|
|
| 101 |
df = pd.read_csv(LOG_FILE)
|
| 102 |
if df.empty:
|
| 103 |
+
return "⚠️ История пуста."
|
| 104 |
+
|
| 105 |
df["time"] = pd.to_datetime(df["time"])
|
| 106 |
+
df = df.tail(50)
|
| 107 |
|
| 108 |
plt.figure(figsize=(8, 4))
|
| 109 |
+
plt.plot(df["time"], df["response_time"], marker="o", linewidth=1.5)
|
| 110 |
+
plt.title("⚡ Скорость отклика моделей (последние 50 запросов)")
|
| 111 |
plt.xlabel("Время")
|
| 112 |
plt.ylabel("Время отклика (сек)")
|
| 113 |
plt.grid(True)
|
|
|
|
| 117 |
buffer.seek(0)
|
| 118 |
img_base64 = base64.b64encode(buffer.read()).decode("utf-8")
|
| 119 |
plt.close()
|
|
|
|
| 120 |
return f"<img src='data:image/png;base64,{img_base64}'/>"
|
| 121 |
|
|
|
|
|
|
|
|
|
|
| 122 |
def show_dashboard():
|
| 123 |
metrics_text, df = dashboard.dashboard_ui()
|
| 124 |
chart_html = generate_chart()
|
| 125 |
return metrics_text, df, chart_html
|
| 126 |
|
| 127 |
# ==============================
|
| 128 |
+
# 🎨 Gradio Interface
|
| 129 |
# ==============================
|
| 130 |
+
with gr.Blocks(title="Eroha AgentAPI v5.5 — Adaptive AutoRouter", theme="soft") as app:
|
| 131 |
+
gr.Markdown("# 🤖 Eroha AgentAPI v5.5 — Guru Edition (AutoRouter Adaptive Core)")
|
| 132 |
+
gr.Markdown("**Интеллект + самообучение + аналитика + автоопределение API** 🧩")
|
| 133 |
|
| 134 |
with gr.Tab("💬 Agent Chat"):
|
| 135 |
+
user_input = gr.Textbox(label="Введите запрос", placeholder="Например: объясни, как работает нейронная сеть, используя метафоры.")
|
| 136 |
output_box = gr.Textbox(label="Ответ", lines=15)
|
| 137 |
submit_btn = gr.Button("🚀 Отправить")
|
| 138 |
submit_btn.click(fn=generate_response, inputs=user_input, outputs=output_box)
|