Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -65,10 +65,17 @@ llm = HuggingFacePipeline(pipeline=pipe)
|
|
| 65 |
# --- Step 4: Setup memory and QA chain ---
|
| 66 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 67 |
|
| 68 |
-
prompt = PromptTemplate.from_template(
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
Context:
|
| 74 |
{context}
|
|
@@ -76,9 +83,19 @@ Context:
|
|
| 76 |
Question:
|
| 77 |
{question}
|
| 78 |
|
| 79 |
-
|
| 80 |
-
""")
|
|
|
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 83 |
llm=llm,
|
| 84 |
retriever=vectordb.as_retriever(search_kwargs={"k": 3}),
|
|
@@ -90,11 +107,21 @@ qa_chain = ConversationalRetrievalChain.from_llm(
|
|
| 90 |
UH_LOGO = "images/UH.png"
|
| 91 |
|
| 92 |
# --- Step 5: Define chatbot logic ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
def chat(message, history):
|
| 94 |
result = qa_chain.invoke({"question": message})
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
response = response.split("Answer: ")[-1].replace("<|assistant|>", "")
|
| 98 |
# keep GPU clean between turns (helps on Spaces)
|
| 99 |
if torch.cuda.is_available():
|
| 100 |
torch.cuda.empty_cache()
|
|
|
|
| 65 |
# --- Step 4: Setup memory and QA chain ---
|
| 66 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 67 |
|
| 68 |
+
prompt = PromptTemplate.from_template("""
|
| 69 |
+
You are a helpful assistant at the University of Hertfordshire.
|
| 70 |
+
Use the provided context to create a **clear, concise, step-by-step answer** in friendly, student-friendly language.
|
| 71 |
+
Do not copy the context verbatim—paraphrase where possible.
|
| 72 |
+
Remove any irrelevant details.
|
| 73 |
+
If the answer is not in the context, reply: "I don't know."
|
| 74 |
+
|
| 75 |
+
Format the answer like this:
|
| 76 |
+
1) Step one...
|
| 77 |
+
2) Step two...
|
| 78 |
+
(Use numbered steps where possible.)
|
| 79 |
|
| 80 |
Context:
|
| 81 |
{context}
|
|
|
|
| 83 |
Question:
|
| 84 |
{question}
|
| 85 |
|
| 86 |
+
Final refined answer:
|
| 87 |
+
""".strip()
|
| 88 |
+
)
|
| 89 |
|
| 90 |
+
def refine_answer(raw_answer):
|
| 91 |
+
# Remove extra markers
|
| 92 |
+
text = raw_answer.strip()
|
| 93 |
+
text = text.replace("Helpful answer:", "").strip()
|
| 94 |
+
# Capitalise first letter if missing
|
| 95 |
+
if text and not text[0].isupper():
|
| 96 |
+
text = text[0].upper() + text[1:]
|
| 97 |
+
return text
|
| 98 |
+
|
| 99 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 100 |
llm=llm,
|
| 101 |
retriever=vectordb.as_retriever(search_kwargs={"k": 3}),
|
|
|
|
| 107 |
UH_LOGO = "images/UH.png"
|
| 108 |
|
| 109 |
# --- Step 5: Define chatbot logic ---
|
| 110 |
+
|
| 111 |
+
def refine_answer(raw_answer: str) -> str:
|
| 112 |
+
"""Clean and polish raw model output."""
|
| 113 |
+
text = raw_answer.strip()
|
| 114 |
+
# Remove prompt artifacts
|
| 115 |
+
for marker in ["Helpful answer:", "<|assistant|>", "Refined helpful answer:"]:
|
| 116 |
+
text = text.replace(marker, "")
|
| 117 |
+
# Normalise spaces
|
| 118 |
+
text = " ".join(text.split())
|
| 119 |
+
return text
|
| 120 |
+
|
| 121 |
def chat(message, history):
|
| 122 |
result = qa_chain.invoke({"question": message})
|
| 123 |
+
response = refine_answer(result.get("answer", ""))
|
| 124 |
+
|
|
|
|
| 125 |
# keep GPU clean between turns (helps on Spaces)
|
| 126 |
if torch.cuda.is_available():
|
| 127 |
torch.cuda.empty_cache()
|