Commit
·
6006bfe
1
Parent(s):
de4c56a
Update chat model initialization to "deepseek-r1-distill-llama-70b" and refactor prompt creation using ChatPromptTemplate for improved FAQ chatbot responses in main.py.
Browse files
main.py
CHANGED
|
@@ -14,6 +14,7 @@ from pydantic.main import BaseModel
|
|
| 14 |
from typing_extensions import List, TypedDict
|
| 15 |
|
| 16 |
from langchain_cohere import CohereEmbeddings
|
|
|
|
| 17 |
|
| 18 |
import re
|
| 19 |
# from dotenv import load_dotenv
|
|
@@ -31,7 +32,7 @@ if not os.environ.get("GROQ_API_KEY"):
|
|
| 31 |
# print(f"GROQ_API_KEY: {os.getenv('GROQ_API_KEY')}")
|
| 32 |
# print(f"HUGGING_FACE_API_KEY: {os.getenv('HUGGING_FACE_API_KEY')}")
|
| 33 |
|
| 34 |
-
llm = init_chat_model("
|
| 35 |
'''
|
| 36 |
embeddings = HuggingFaceInferenceAPIEmbeddings(
|
| 37 |
api_key = os.getenv('HUGGING_FACE_API_KEY'),
|
|
@@ -70,7 +71,14 @@ docs = [Document(page_content=text.page_content, metadata=text.metadata) for tex
|
|
| 70 |
_ = vector_store.add_documents(documents=docs)
|
| 71 |
|
| 72 |
|
| 73 |
-
prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
class State(TypedDict):
|
| 76 |
question: str
|
|
@@ -83,8 +91,10 @@ def retrieve(state: State):
|
|
| 83 |
|
| 84 |
def generate(state: State):
|
| 85 |
docs_content = "\n\n".join(doc.page_content for doc in state["context"])
|
| 86 |
-
messages = prompt.
|
| 87 |
-
|
|
|
|
|
|
|
| 88 |
response = llm.invoke(messages)
|
| 89 |
return {"answer": response.content}
|
| 90 |
|
|
|
|
| 14 |
from typing_extensions import List, TypedDict
|
| 15 |
|
| 16 |
from langchain_cohere import CohereEmbeddings
|
| 17 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 18 |
|
| 19 |
import re
|
| 20 |
# from dotenv import load_dotenv
|
|
|
|
| 32 |
# print(f"GROQ_API_KEY: {os.getenv('GROQ_API_KEY')}")
|
| 33 |
# print(f"HUGGING_FACE_API_KEY: {os.getenv('HUGGING_FACE_API_KEY')}")
|
| 34 |
|
| 35 |
+
llm = init_chat_model("deepseek-r1-distill-llama-70b", model_provider="groq", api_key=os.environ["GROQ_API_KEY"])
|
| 36 |
'''
|
| 37 |
embeddings = HuggingFaceInferenceAPIEmbeddings(
|
| 38 |
api_key = os.getenv('HUGGING_FACE_API_KEY'),
|
|
|
|
| 71 |
_ = vector_store.add_documents(documents=docs)
|
| 72 |
|
| 73 |
|
| 74 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 75 |
+
SystemMessage(content="""You are a helpful FAQ chatbot assistant for the Coherence 2025 Hackathon.
|
| 76 |
+
Use the provided context to answer questions accurately and concisely.
|
| 77 |
+
If the answer cannot be found in the context, say so clearly.
|
| 78 |
+
Keep your responses friendly and professional."""),
|
| 79 |
+
MessagesPlaceholder(variable_name="context"),
|
| 80 |
+
HumanMessage(content="{question}")
|
| 81 |
+
])
|
| 82 |
|
| 83 |
class State(TypedDict):
|
| 84 |
question: str
|
|
|
|
| 91 |
|
| 92 |
def generate(state: State):
|
| 93 |
docs_content = "\n\n".join(doc.page_content for doc in state["context"])
|
| 94 |
+
messages = prompt.format_messages(
|
| 95 |
+
context=docs_content,
|
| 96 |
+
question=state["question"]
|
| 97 |
+
)
|
| 98 |
response = llm.invoke(messages)
|
| 99 |
return {"answer": response.content}
|
| 100 |
|