Spaces:
Paused
Paused
Update chat_handler.py
Browse files- chat_handler.py +97 -85
chat_handler.py
CHANGED
|
@@ -1,85 +1,97 @@
|
|
| 1 |
-
from fastapi import Request
|
| 2 |
-
from fastapi.responses import JSONResponse
|
| 3 |
-
import traceback
|
| 4 |
-
import random
|
| 5 |
-
from llm_model import Message, LLMModel
|
| 6 |
-
from intent_api import execute_intent
|
| 7 |
-
from intent_utils import validate_variable_formats
|
| 8 |
-
from parse_llm_blocks import parse_llm_blocks
|
| 9 |
-
from log import log
|
| 10 |
-
|
| 11 |
-
async def handle_chat(msg: Message, request: Request, app, service_config, session, llm_model: LLMModel):
|
| 12 |
-
try:
|
| 13 |
-
user_input = msg.user_input.strip()
|
| 14 |
-
log(f"💬 Kullanıcı input'u: '{user_input}'")
|
| 15 |
-
|
| 16 |
-
project_name = session.project_name
|
| 17 |
-
project_config = service_config.get_project_llm_config(project_name)
|
| 18 |
-
system_prompt = service_config.system_prompt
|
| 19 |
-
|
| 20 |
-
# Chat history'ye user mesajını ekle
|
| 21 |
-
session.chat_history.append({"role": "user", "content": user_input})
|
| 22 |
-
|
| 23 |
-
# === LLM çağrısı
|
| 24 |
-
llm_response = await llm_model.generate_response_with_messages(session.chat_history, project_config, system_prompt)
|
| 25 |
-
log(f"🤖 LLM cevabı: {llm_response}")
|
| 26 |
-
|
| 27 |
-
# === LLM cevabını parse et
|
| 28 |
-
parsed = parse_llm_blocks(llm_response)
|
| 29 |
-
intent = parsed["intent"]
|
| 30 |
-
params = parsed["params"]
|
| 31 |
-
missing = parsed["missing"]
|
| 32 |
-
action_json = parsed["action_json"]
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
session.
|
| 41 |
-
session.
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
session.
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
variable_format_map = intent_definitions.get(intent, {}).get("variable_formats", {})
|
| 59 |
-
is_valid, validation_errors = validate_variable_formats(session.variables, variable_format_map, data_formats)
|
| 60 |
-
|
| 61 |
-
if not is_valid:
|
| 62 |
-
session.awaiting_variable = list(validation_errors.keys())[0]
|
| 63 |
-
return {"response": list(validation_errors.values())[0]}
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import Request
|
| 2 |
+
from fastapi.responses import JSONResponse
|
| 3 |
+
import traceback
|
| 4 |
+
import random
|
| 5 |
+
from llm_model import Message, LLMModel
|
| 6 |
+
from intent_api import execute_intent
|
| 7 |
+
from intent_utils import validate_variable_formats
|
| 8 |
+
from parse_llm_blocks import parse_llm_blocks
|
| 9 |
+
from log import log
|
| 10 |
+
|
| 11 |
+
async def handle_chat(msg: Message, request: Request, app, service_config, session, llm_model: LLMModel):
|
| 12 |
+
try:
|
| 13 |
+
user_input = msg.user_input.strip()
|
| 14 |
+
log(f"💬 Kullanıcı input'u: '{user_input}'")
|
| 15 |
+
|
| 16 |
+
project_name = session.project_name
|
| 17 |
+
project_config = service_config.get_project_llm_config(project_name)
|
| 18 |
+
system_prompt = service_config.system_prompt
|
| 19 |
+
|
| 20 |
+
# Chat history'ye user mesajını ekle
|
| 21 |
+
session.chat_history.append({"role": "user", "content": user_input})
|
| 22 |
+
|
| 23 |
+
# === LLM çağrısı
|
| 24 |
+
llm_response = await llm_model.generate_response_with_messages(session.chat_history, project_config, system_prompt)
|
| 25 |
+
log(f"🤖 LLM cevabı: {llm_response}")
|
| 26 |
+
|
| 27 |
+
# === LLM cevabını parse et
|
| 28 |
+
parsed = parse_llm_blocks(llm_response)
|
| 29 |
+
intent = parsed["intent"]
|
| 30 |
+
params = parsed["params"]
|
| 31 |
+
missing = parsed["missing"]
|
| 32 |
+
action_json = parsed["action_json"]
|
| 33 |
+
answer = parsed.get("answer")
|
| 34 |
+
|
| 35 |
+
# Chat history'ye assistant cevabını ekle
|
| 36 |
+
session.chat_history.append({"role": "assistant", "content": llm_response})
|
| 37 |
+
|
| 38 |
+
# === INTENT yok → direkt cevap
|
| 39 |
+
if intent == "NONE":
|
| 40 |
+
session.awaiting_variable = None
|
| 41 |
+
session.last_intent = None
|
| 42 |
+
session.variables.clear()
|
| 43 |
+
return {"response": answer}
|
| 44 |
+
|
| 45 |
+
# === INTENT varsa → parametreleri güncelle
|
| 46 |
+
session.last_intent = intent
|
| 47 |
+
session.variables.update(params)
|
| 48 |
+
|
| 49 |
+
# Eksik parametre varsa → kullanıcıdan istenecek
|
| 50 |
+
if missing:
|
| 51 |
+
session.awaiting_variable = missing[0]
|
| 52 |
+
return {"response": f"Lütfen {', '.join(missing)} bilgisini belirtir misiniz?"}
|
| 53 |
+
|
| 54 |
+
# Parametreleri validasyonla kontrol et
|
| 55 |
+
intent_definitions = {i["name"]: i for i in service_config.get_project_intents(project_name)}
|
| 56 |
+
data_formats = service_config.data_formats
|
| 57 |
+
|
| 58 |
+
variable_format_map = intent_definitions.get(intent, {}).get("variable_formats", {})
|
| 59 |
+
is_valid, validation_errors = validate_variable_formats(session.variables, variable_format_map, data_formats)
|
| 60 |
+
|
| 61 |
+
if not is_valid:
|
| 62 |
+
session.awaiting_variable = list(validation_errors.keys())[0]
|
| 63 |
+
return {"response": list(validation_errors.values())[0]}
|
| 64 |
+
|
| 65 |
+
# === API çağrısı yap
|
| 66 |
+
log("🚀 execute_intent() çağrılıyor...")
|
| 67 |
+
result = execute_intent(
|
| 68 |
+
intent,
|
| 69 |
+
user_input,
|
| 70 |
+
session.__dict__,
|
| 71 |
+
intent_definitions,
|
| 72 |
+
data_formats,
|
| 73 |
+
project_name,
|
| 74 |
+
service_config
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
if "reply" in result:
|
| 78 |
+
api_reply = result["reply"]
|
| 79 |
+
# API cevabını modele geri verip insani cevap iste
|
| 80 |
+
response_prompt = f"intent:{intent} response:{api_reply}"
|
| 81 |
+
log(f"🤖 API cevabı modele gönderiliyor: {response_prompt}")
|
| 82 |
+
|
| 83 |
+
session.chat_history.append({"role": "user", "content": response_prompt})
|
| 84 |
+
final_response = await llm_model.generate_response_with_messages(session.chat_history, project_config, system_prompt)
|
| 85 |
+
log(f"🤖 Final insani cevap: {final_response}")
|
| 86 |
+
|
| 87 |
+
session.chat_history.append({"role": "assistant", "content": final_response})
|
| 88 |
+
return {"response": final_response}
|
| 89 |
+
|
| 90 |
+
elif "errors" in result:
|
| 91 |
+
return {"response": list(result["errors"].values())[0]}
|
| 92 |
+
else:
|
| 93 |
+
return {"response": random.choice(project_config["fallback_answers"])}
|
| 94 |
+
|
| 95 |
+
except Exception as e:
|
| 96 |
+
traceback.print_exc()
|
| 97 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|