Update app.py
Browse files
app.py
CHANGED
|
@@ -353,45 +353,55 @@ def build_knowledge_base(
|
|
| 353 |
return status, kb
|
| 354 |
|
| 355 |
|
| 356 |
-
def
|
| 357 |
"""
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
- dict
|
| 363 |
-
- None
|
| 364 |
"""
|
| 365 |
-
if
|
| 366 |
return ""
|
| 367 |
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
text_piece = p.get("text") or ""
|
| 379 |
-
else:
|
| 380 |
-
# object with .text or fallback to str
|
| 381 |
-
text_piece = getattr(p, "text", "") or ""
|
| 382 |
-
if not text_piece:
|
| 383 |
-
text_piece = str(p)
|
| 384 |
-
parts.append(text_piece)
|
| 385 |
-
return "\n".join(parts).strip()
|
| 386 |
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 393 |
|
| 394 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
|
| 397 |
def chat_with_rag(
|
|
@@ -403,7 +413,7 @@ def chat_with_rag(
|
|
| 403 |
):
|
| 404 |
"""
|
| 405 |
history_pairs: list of [user_str, assistant_str] pairs for the UI Chatbot.
|
| 406 |
-
We'll rebuild
|
| 407 |
"""
|
| 408 |
user_message = (user_message or "").strip()
|
| 409 |
api_key = (api_key or "").strip()
|
|
@@ -423,15 +433,20 @@ def chat_with_rag(
|
|
| 423 |
|
| 424 |
client = OpenAI(api_key=api_key)
|
| 425 |
|
| 426 |
-
#
|
| 427 |
-
|
| 428 |
|
| 429 |
combined_system = (
|
| 430 |
DEFAULT_SYSTEM_PROMPT.strip()
|
| 431 |
+ "\n\n---\n\nUser System Instructions:\n"
|
| 432 |
+ system_prompt.strip()
|
| 433 |
)
|
| 434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
|
| 436 |
if context:
|
| 437 |
context_block = (
|
|
@@ -440,28 +455,47 @@ def chat_with_rag(
|
|
| 440 |
"If the answer is not supported by the context, say you don’t know.\n\n"
|
| 441 |
f"{context}"
|
| 442 |
)
|
| 443 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
|
| 445 |
# Rebuild conversation history from pairs (last few turns)
|
| 446 |
recent_pairs = history_pairs[-5:] if history_pairs else []
|
| 447 |
for u, a in recent_pairs:
|
| 448 |
-
|
| 449 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
|
| 451 |
# Current user message
|
| 452 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
|
| 454 |
-
# Call OpenAI GPT-5
|
| 455 |
try:
|
| 456 |
-
resp = client.
|
| 457 |
model=CHAT_MODEL,
|
| 458 |
-
|
| 459 |
-
|
| 460 |
)
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
answer = "⚠️ Model returned an empty response. This may be a transient issue with the API."
|
| 465 |
except Exception as e:
|
| 466 |
answer = f"⚠️ OpenAI API error: {e}"
|
| 467 |
|
|
|
|
| 353 |
return status, kb
|
| 354 |
|
| 355 |
|
| 356 |
+
def extract_text_from_response(resp: Any) -> str:
|
| 357 |
"""
|
| 358 |
+
Extract plain text from the Responses API result.
|
| 359 |
+
We assume structure like:
|
| 360 |
+
resp.output -> list of output items
|
| 361 |
+
each item.content -> list of content parts with .text or ['text']
|
|
|
|
|
|
|
| 362 |
"""
|
| 363 |
+
if resp is None:
|
| 364 |
return ""
|
| 365 |
|
| 366 |
+
texts: List[str] = []
|
| 367 |
+
|
| 368 |
+
# New Responses API usually has resp.output
|
| 369 |
+
output = getattr(resp, "output", None) or getattr(resp, "data", None)
|
| 370 |
+
if output is None:
|
| 371 |
+
# Fallback to just stringifying
|
| 372 |
+
return str(resp)
|
| 373 |
+
|
| 374 |
+
if not isinstance(output, (list, tuple)):
|
| 375 |
+
output = [output]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
|
| 377 |
+
for item in output:
|
| 378 |
+
content = getattr(item, "content", None)
|
| 379 |
+
if content is None and isinstance(item, dict):
|
| 380 |
+
content = item.get("content")
|
| 381 |
+
if content is None:
|
| 382 |
+
continue
|
| 383 |
+
|
| 384 |
+
if not isinstance(content, (list, tuple)):
|
| 385 |
+
content = [content]
|
| 386 |
+
|
| 387 |
+
for part in content:
|
| 388 |
+
# Part might be object with .text
|
| 389 |
+
txt = getattr(part, "text", None)
|
| 390 |
+
if isinstance(txt, str) and txt.strip():
|
| 391 |
+
texts.append(txt)
|
| 392 |
+
continue
|
| 393 |
|
| 394 |
+
# Or dict-like
|
| 395 |
+
if isinstance(part, dict):
|
| 396 |
+
t = part.get("text")
|
| 397 |
+
if isinstance(t, str) and t.strip():
|
| 398 |
+
texts.append(t)
|
| 399 |
+
continue
|
| 400 |
+
|
| 401 |
+
# Fallback, stringify
|
| 402 |
+
texts.append(str(part))
|
| 403 |
+
|
| 404 |
+
return "\n".join(texts).strip()
|
| 405 |
|
| 406 |
|
| 407 |
def chat_with_rag(
|
|
|
|
| 413 |
):
|
| 414 |
"""
|
| 415 |
history_pairs: list of [user_str, assistant_str] pairs for the UI Chatbot.
|
| 416 |
+
We'll rebuild conversation history for the Responses API each time.
|
| 417 |
"""
|
| 418 |
user_message = (user_message or "").strip()
|
| 419 |
api_key = (api_key or "").strip()
|
|
|
|
| 433 |
|
| 434 |
client = OpenAI(api_key=api_key)
|
| 435 |
|
| 436 |
+
# Build input for Responses API
|
| 437 |
+
input_messages: List[Dict[str, Any]] = []
|
| 438 |
|
| 439 |
combined_system = (
|
| 440 |
DEFAULT_SYSTEM_PROMPT.strip()
|
| 441 |
+ "\n\n---\n\nUser System Instructions:\n"
|
| 442 |
+ system_prompt.strip()
|
| 443 |
)
|
| 444 |
+
input_messages.append(
|
| 445 |
+
{
|
| 446 |
+
"role": "system",
|
| 447 |
+
"content": [{"type": "input_text", "text": combined_system}],
|
| 448 |
+
}
|
| 449 |
+
)
|
| 450 |
|
| 451 |
if context:
|
| 452 |
context_block = (
|
|
|
|
| 455 |
"If the answer is not supported by the context, say you don’t know.\n\n"
|
| 456 |
f"{context}"
|
| 457 |
)
|
| 458 |
+
input_messages.append(
|
| 459 |
+
{
|
| 460 |
+
"role": "system",
|
| 461 |
+
"content": [{"type": "input_text", "text": context_block}],
|
| 462 |
+
}
|
| 463 |
+
)
|
| 464 |
|
| 465 |
# Rebuild conversation history from pairs (last few turns)
|
| 466 |
recent_pairs = history_pairs[-5:] if history_pairs else []
|
| 467 |
for u, a in recent_pairs:
|
| 468 |
+
input_messages.append(
|
| 469 |
+
{
|
| 470 |
+
"role": "user",
|
| 471 |
+
"content": [{"type": "input_text", "text": u}],
|
| 472 |
+
}
|
| 473 |
+
)
|
| 474 |
+
input_messages.append(
|
| 475 |
+
{
|
| 476 |
+
"role": "assistant",
|
| 477 |
+
"content": [{"type": "output_text", "text": a}],
|
| 478 |
+
}
|
| 479 |
+
)
|
| 480 |
|
| 481 |
# Current user message
|
| 482 |
+
input_messages.append(
|
| 483 |
+
{
|
| 484 |
+
"role": "user",
|
| 485 |
+
"content": [{"type": "input_text", "text": user_message}],
|
| 486 |
+
}
|
| 487 |
+
)
|
| 488 |
|
| 489 |
+
# Call OpenAI GPT-5 via Responses API
|
| 490 |
try:
|
| 491 |
+
resp = client.responses.create(
|
| 492 |
model=CHAT_MODEL,
|
| 493 |
+
input=input_messages,
|
| 494 |
+
# no temperature, no token params -> avoid unsupported parameter errors
|
| 495 |
)
|
| 496 |
+
answer = extract_text_from_response(resp)
|
| 497 |
+
if not answer.strip():
|
| 498 |
+
answer = "⚠️ Model returned an empty response object. This may be an API issue."
|
|
|
|
| 499 |
except Exception as e:
|
| 500 |
answer = f"⚠️ OpenAI API error: {e}"
|
| 501 |
|