vietqa-api / src /nodes /direct.py
quanho114
Add chat mode support - natural responses without MCQ format
55f1010
"""Direct Answer node for reading comprehension or general questions without RAG."""
from langchain_core.prompts import ChatPromptTemplate
from src.data_processing.answer import extract_answer
from src.data_processing.formatting import format_choices
from src.state import GraphState
from src.utils.llm import get_large_model
from src.utils.logging import print_log
from src.utils.prompts import load_prompt
def direct_answer_node(state: GraphState) -> dict:
"""Answer questions directly using Large Model (Skip Retrieval)."""
print_log(" [Direct] Processing Reading Comprehension/General Question...")
all_choices = state["all_choices"]
choices_text = format_choices(all_choices)
llm = get_large_model()
system_prompt = load_prompt("direct_answer.j2", "system", choices=choices_text)
user_prompt = load_prompt("direct_answer.j2", "user", question=state["question"], choices=choices_text)
# Escape curly braces to prevent LangChain from parsing them as variables
system_prompt = system_prompt.replace("{", "{{").replace("}", "}}")
user_prompt = user_prompt.replace("{", "{{").replace("}", "}}")
prompt = ChatPromptTemplate.from_messages([
("system", system_prompt),
("human", user_prompt),
])
chain = prompt | llm
response = chain.invoke({})
content = response.content.strip()
print_log(f" [Direct] Reasoning: {content}...")
# Chat mode: return raw response without answer extraction
if not all_choices:
print_log(" [Direct] Chat mode - returning natural response")
return {"answer": "", "raw_response": content}
answer = extract_answer(content, num_choices=len(all_choices) or 4)
print_log(f" [Direct] Final Answer: {answer}")
return {"answer": answer, "raw_response": content}