Commit
·
2fbc411
1
Parent(s):
bdf89c3
Implement custom prompt and message formatting for FAQ chatbot in main.py to enhance response accuracy and professionalism.
Browse files
main.py
CHANGED
|
@@ -72,6 +72,23 @@ _ = vector_store.add_documents(documents=docs)
|
|
| 72 |
|
| 73 |
prompt = hub.pull("rlm/rag-prompt")
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
class State(TypedDict):
|
| 76 |
question: str
|
| 77 |
context: List[Document]
|
|
@@ -83,7 +100,13 @@ def retrieve(state: State):
|
|
| 83 |
|
| 84 |
def generate(state: State):
|
| 85 |
docs_content = "\n\n".join(doc.page_content for doc in state["context"])
|
| 86 |
-
messages =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
print(messages)
|
| 88 |
response = llm.invoke(messages)
|
| 89 |
return {"answer": response.content}
|
|
|
|
| 72 |
|
| 73 |
prompt = hub.pull("rlm/rag-prompt")
|
| 74 |
|
| 75 |
+
# Replace with custom prompt
|
| 76 |
+
system_message = """You are a helpful and professional FAQ chatbot for the MLSC Coherence 25 Hackathon. Your role is to:
|
| 77 |
+
1. Provide accurate and concise answers based on the provided context
|
| 78 |
+
2. Be friendly but professional in tone
|
| 79 |
+
3. If you don't know the answer, simply say "I don't have information about that"
|
| 80 |
+
4. Keep responses brief and to the point
|
| 81 |
+
5. Focus on providing factual information from the context
|
| 82 |
+
6. Never mention "the provided context" or similar phrases in your responses
|
| 83 |
+
7. Never explain why you don't know something - just state that you don't know
|
| 84 |
+
8. Be direct and avoid unnecessary explanations"""
|
| 85 |
+
|
| 86 |
+
human_message_template = """Context: {context}
|
| 87 |
+
|
| 88 |
+
Question: {question}
|
| 89 |
+
|
| 90 |
+
Please provide a clear and concise answer based on the context above."""
|
| 91 |
+
|
| 92 |
class State(TypedDict):
|
| 93 |
question: str
|
| 94 |
context: List[Document]
|
|
|
|
| 100 |
|
| 101 |
def generate(state: State):
|
| 102 |
docs_content = "\n\n".join(doc.page_content for doc in state["context"])
|
| 103 |
+
messages = [
|
| 104 |
+
SystemMessage(content=system_message),
|
| 105 |
+
HumanMessage(content=human_message_template.format(
|
| 106 |
+
context=docs_content,
|
| 107 |
+
question=state["question"]
|
| 108 |
+
))
|
| 109 |
+
]
|
| 110 |
print(messages)
|
| 111 |
response = llm.invoke(messages)
|
| 112 |
return {"answer": response.content}
|