Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,117 +1,93 @@
|
|
| 1 |
-
import
|
| 2 |
import os
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
from qdrant_client import QdrantClient
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
|
| 7 |
# --- НАСТРОЙКИ ---
|
| 8 |
-
st.set_page_config(page_title="Sales AI + RAG", page_icon="💼", layout="centered")
|
| 9 |
-
st.title("💼 Виртуальный Отдел Продаж")
|
| 10 |
-
st.caption("Чат с базой знаний (RAG) на Qwen 2.5")
|
| 11 |
-
|
| 12 |
-
# --- КЛЮЧИ ---
|
| 13 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 14 |
QDRANT_URL = os.getenv("QDRANT_URL")
|
| 15 |
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
|
|
|
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
def load_resources():
|
| 24 |
-
# 1. Чат-модель
|
| 25 |
-
hf_client = InferenceClient("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
|
| 26 |
|
| 27 |
-
# 2. База знаний (Qdrant)
|
| 28 |
q_client = None
|
| 29 |
if QDRANT_URL and QDRANT_API_KEY:
|
| 30 |
try:
|
| 31 |
q_client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
|
| 32 |
-
print("✅ Qdrant
|
| 33 |
-
except
|
| 34 |
-
print(
|
| 35 |
|
| 36 |
-
# 3. Модель для поиска (Embeddings)
|
| 37 |
encoder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 38 |
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
|
|
|
|
|
|
|
| 42 |
|
| 43 |
-
# --- ФУНКЦИЯ ПОИСК
|
| 44 |
-
def get_context(query):
|
| 45 |
-
if not
|
| 46 |
-
return ""
|
| 47 |
try:
|
| 48 |
-
# Превращаем вопрос в вектор
|
| 49 |
vector = encoder.encode(query).tolist()
|
| 50 |
-
|
| 51 |
-
# Ищем в коллекции "sales_knowledge" (или создадим её позже)
|
| 52 |
-
search_result = qdrant.search(
|
| 53 |
collection_name="sales_knowledge",
|
| 54 |
query_vector=vector,
|
| 55 |
limit=3
|
| 56 |
)
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
except Exception as e:
|
| 60 |
-
print(f"Ошибка поиска: {e}")
|
| 61 |
-
return ""
|
| 62 |
-
|
| 63 |
-
# --- ЧАТ ---
|
| 64 |
-
if "messages" not in st.session_state:
|
| 65 |
-
st.session_state.messages = []
|
| 66 |
|
| 67 |
-
#
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
#
|
| 73 |
-
|
| 74 |
-
# 1. Сохраняем вопрос юзера
|
| 75 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 76 |
-
with st.chat_message("user"):
|
| 77 |
-
st.markdown(prompt)
|
| 78 |
-
|
| 79 |
-
# 2. ИЩЕМ КОНТЕКСТ В QDRANT
|
| 80 |
-
context = get_context(prompt)
|
| 81 |
|
| 82 |
-
#
|
| 83 |
-
|
| 84 |
if context:
|
| 85 |
-
|
| 86 |
-
|
|
|
|
| 87 |
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
full_response = ""
|
| 97 |
-
|
| 98 |
-
try:
|
| 99 |
-
stream = client.chat_completion(
|
| 100 |
-
messages=api_messages,
|
| 101 |
-
max_tokens=512,
|
| 102 |
-
stream=True,
|
| 103 |
-
temperature=0.7
|
| 104 |
-
)
|
| 105 |
-
|
| 106 |
-
for chunk in stream:
|
| 107 |
-
content = chunk.choices[0].delta.content
|
| 108 |
-
if content:
|
| 109 |
-
full_response += content
|
| 110 |
-
message_placeholder.markdown(full_response + "▌")
|
| 111 |
-
|
| 112 |
-
message_placeholder.markdown(full_response)
|
| 113 |
-
|
| 114 |
-
except Exception as e:
|
| 115 |
-
st.error(f"Ошибка API: {e}")
|
| 116 |
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import chainlit as cl
|
| 2 |
import os
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
from qdrant_client import QdrantClient
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
|
| 7 |
# --- НАСТРОЙКИ ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 9 |
QDRANT_URL = os.getenv("QDRANT_URL")
|
| 10 |
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
|
| 11 |
+
MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
|
| 12 |
|
| 13 |
+
# --- ИНИЦИАЛИЗАЦИЯ ПРИ СТАРТЕ ЧАТА ---
|
| 14 |
+
@cl.on_chat_start
|
| 15 |
+
async def start():
|
| 16 |
+
# 1. Показываем загрузку
|
| 17 |
+
msg = cl.Message(content="Загружаю базы знаний и подключаюсь к нейросетям...")
|
| 18 |
+
await msg.send()
|
| 19 |
|
| 20 |
+
# 2. Инициализируем клиентов (сохраняем в сессию пользователя)
|
| 21 |
+
hf_client = InferenceClient(MODEL_ID, token=HF_TOKEN)
|
|
|
|
|
|
|
|
|
|
| 22 |
|
|
|
|
| 23 |
q_client = None
|
| 24 |
if QDRANT_URL and QDRANT_API_KEY:
|
| 25 |
try:
|
| 26 |
q_client = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
|
| 27 |
+
print("✅ Qdrant OK")
|
| 28 |
+
except:
|
| 29 |
+
print("❌ Qdrant Error")
|
| 30 |
|
|
|
|
| 31 |
encoder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 32 |
|
| 33 |
+
# Сохраняем в сессию, чтобы использовать при каждом сообщении
|
| 34 |
+
cl.user_session.set("hf_client", hf_client)
|
| 35 |
+
cl.user_session.set("q_client", q_client)
|
| 36 |
+
cl.user_session.set("encoder", encoder)
|
| 37 |
|
| 38 |
+
# 3. Обновляем сообщение на приветствие
|
| 39 |
+
msg.content = "👋 Привет! Я готов к работе. Задай вопрос по базе знаний."
|
| 40 |
+
await msg.update()
|
| 41 |
|
| 42 |
+
# --- ФУНКЦИЯ RAG (ПОИСК) ---
|
| 43 |
+
def get_context(query, q_client, encoder):
|
| 44 |
+
if not q_client: return ""
|
|
|
|
| 45 |
try:
|
|
|
|
| 46 |
vector = encoder.encode(query).tolist()
|
| 47 |
+
hits = q_client.search(
|
|
|
|
|
|
|
| 48 |
collection_name="sales_knowledge",
|
| 49 |
query_vector=vector,
|
| 50 |
limit=3
|
| 51 |
)
|
| 52 |
+
return "\n".join([hit.payload.get("text", "") for hit in hits])
|
| 53 |
+
except: return ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# --- ОБРАБОТКА СООБЩЕНИЯ ---
|
| 56 |
+
@cl.on_message
|
| 57 |
+
async def main(message: cl.Message):
|
| 58 |
+
# Достаем инструменты из сессии
|
| 59 |
+
hf_client = cl.user_session.get("hf_client")
|
| 60 |
+
q_client = cl.user_session.get("q_client")
|
| 61 |
+
encoder = cl.user_session.get("encoder")
|
| 62 |
|
| 63 |
+
# 1. Ищем контекст в RAG
|
| 64 |
+
context = get_context(message.content, q_client, encoder)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
# 2. Формируем промпт
|
| 67 |
+
system_prompt = "Ты полезный ассистент. Отвечай на русском языке."
|
| 68 |
if context:
|
| 69 |
+
system_prompt += f"\n\nИспользуй эту информацию для ответа:\n{context}"
|
| 70 |
+
# Можно показать найденный контекст в UI (опционально)
|
| 71 |
+
# await cl.Message(content=f"📚 Нашел в базе:\n{context[:100]}...").send()
|
| 72 |
|
| 73 |
+
messages = [
|
| 74 |
+
{"role": "system", "content": system_prompt},
|
| 75 |
+
{"role": "user", "content": message.content}
|
| 76 |
+
]
|
| 77 |
|
| 78 |
+
# 3. Отправляем в Qwen и стримим ответ
|
| 79 |
+
msg = cl.Message(content="")
|
| 80 |
+
await msg.send() # Создаем пустой пузрь
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
try:
|
| 83 |
+
stream = hf_client.chat_completion(messages, max_tokens=1024, stream=True, temperature=0.7)
|
| 84 |
+
|
| 85 |
+
for chunk in stream:
|
| 86 |
+
token = chunk.choices[0].delta.content
|
| 87 |
+
if token:
|
| 88 |
+
await msg.stream_token(token)
|
| 89 |
+
|
| 90 |
+
await msg.update() # Финализируем сообщение
|
| 91 |
+
|
| 92 |
+
except Exception as e:
|
| 93 |
+
await cl.Message(content=f"Ошибка: {str(e)}").send()
|