Update app.py
Browse files
app.py
CHANGED
|
@@ -461,17 +461,22 @@ async def tool_library_info(question, history=None, model="gpt"):
|
|
| 461 |
prompt = f"""You are the Khalifa University Library AI Assistant in Abu Dhabi, UAE.
|
| 462 |
KU means Khalifa University, NOT Kuwait University.
|
| 463 |
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 468 |
|
| 469 |
Context:
|
| 470 |
{context}
|
| 471 |
|
| 472 |
Question: {question}
|
| 473 |
|
| 474 |
-
Answer
|
| 475 |
|
| 476 |
# Respect the model selection β use Claude if requested, GPT otherwise
|
| 477 |
use_claude = model == "claude" and os.environ.get("ANTHROPIC_API_KEY")
|
|
@@ -490,14 +495,16 @@ Answer (either answer from context, or output NO_LIBRARY_ANSWER):"""
|
|
| 490 |
|
| 491 |
# Catch cases where LLM ignored the NO_LIBRARY_ANSWER instruction
|
| 492 |
# and generated a polite "I don't have info" response instead
|
|
|
|
|
|
|
|
|
|
| 493 |
no_info_patterns = [
|
| 494 |
-
"i don't have
|
| 495 |
-
"i
|
| 496 |
-
"i
|
| 497 |
-
"i
|
| 498 |
-
"i
|
| 499 |
-
"i
|
| 500 |
-
"i apologize, but i don't",
|
| 501 |
"no specific information",
|
| 502 |
"not available in",
|
| 503 |
"not in the context",
|
|
@@ -505,10 +512,22 @@ Answer (either answer from context, or output NO_LIBRARY_ANSWER):"""
|
|
| 505 |
"context doesn't contain",
|
| 506 |
"i cannot find",
|
| 507 |
"i could not find",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 508 |
]
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
print(f"RAG answer matched no-info pattern, returning has_answer=False")
|
| 512 |
return {"answer": "", "sources": [], "has_answer": False}
|
| 513 |
|
| 514 |
sources = []
|
|
|
|
| 461 |
prompt = f"""You are the Khalifa University Library AI Assistant in Abu Dhabi, UAE.
|
| 462 |
KU means Khalifa University, NOT Kuwait University.
|
| 463 |
|
| 464 |
+
RULES β follow exactly:
|
| 465 |
+
1. Answer ONLY using the context provided below.
|
| 466 |
+
2. If the context has relevant information β answer in 2-4 sentences, include URLs if present.{did_you_mean_instruction}
|
| 467 |
+
3. If the context does NOT contain the answer β output the single word: NO_LIBRARY_ANSWER
|
| 468 |
+
- Do NOT write "I'm sorry"
|
| 469 |
+
- Do NOT write "I don't have information"
|
| 470 |
+
- Do NOT suggest contacting the library
|
| 471 |
+
- Do NOT apologise
|
| 472 |
+
- ONLY output: NO_LIBRARY_ANSWER
|
| 473 |
|
| 474 |
Context:
|
| 475 |
{context}
|
| 476 |
|
| 477 |
Question: {question}
|
| 478 |
|
| 479 |
+
Answer:"""
|
| 480 |
|
| 481 |
# Respect the model selection β use Claude if requested, GPT otherwise
|
| 482 |
use_claude = model == "claude" and os.environ.get("ANTHROPIC_API_KEY")
|
|
|
|
| 495 |
|
| 496 |
# Catch cases where LLM ignored the NO_LIBRARY_ANSWER instruction
|
| 497 |
# and generated a polite "I don't have info" response instead
|
| 498 |
+
# Normalise smart/curly apostrophes to straight before pattern matching
|
| 499 |
+
answer_normalised = answer.lower().replace("β", "'").replace("β", "'").replace("β", '"').replace("β", '"')
|
| 500 |
+
|
| 501 |
no_info_patterns = [
|
| 502 |
+
"i don't have",
|
| 503 |
+
"i do not have",
|
| 504 |
+
"i'm sorry",
|
| 505 |
+
"i am sorry",
|
| 506 |
+
"i apologize",
|
| 507 |
+
"i apologise",
|
|
|
|
| 508 |
"no specific information",
|
| 509 |
"not available in",
|
| 510 |
"not in the context",
|
|
|
|
| 512 |
"context doesn't contain",
|
| 513 |
"i cannot find",
|
| 514 |
"i could not find",
|
| 515 |
+
"don't have information",
|
| 516 |
+
"do not have information",
|
| 517 |
+
"unable to find",
|
| 518 |
+
"unable to provide",
|
| 519 |
+
"not found in",
|
| 520 |
+
"no information",
|
| 521 |
+
"for further inquiries",
|
| 522 |
+
"for more information, please contact",
|
| 523 |
+
"please visit ask",
|
| 524 |
+
"please contact the library",
|
| 525 |
+
"recommend checking",
|
| 526 |
+
"i'd suggest",
|
| 527 |
+
"i would suggest contacting",
|
| 528 |
]
|
| 529 |
+
if any(p in answer_normalised for p in no_info_patterns):
|
| 530 |
+
print(f"RAG no-info pattern matched: '{answer_normalised[:80]}'")
|
|
|
|
| 531 |
return {"answer": "", "sources": [], "has_answer": False}
|
| 532 |
|
| 533 |
sources = []
|