File size: 1,852 Bytes
ebb8326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f1010
ebb8326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55f1010
 
 
 
 
ebb8326
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
"""Direct Answer node for reading comprehension or general questions without RAG."""

from langchain_core.prompts import ChatPromptTemplate

from src.data_processing.answer import extract_answer
from src.data_processing.formatting import format_choices
from src.state import GraphState
from src.utils.llm import get_large_model
from src.utils.logging import print_log
from src.utils.prompts import load_prompt


def direct_answer_node(state: GraphState) -> dict:
    """Answer questions directly using Large Model (Skip Retrieval)."""
    print_log("        [Direct] Processing Reading Comprehension/General Question...")

    all_choices = state["all_choices"]
    choices_text = format_choices(all_choices)
    
    llm = get_large_model()
    
    system_prompt = load_prompt("direct_answer.j2", "system", choices=choices_text)
    user_prompt = load_prompt("direct_answer.j2", "user", question=state["question"], choices=choices_text)
    
    # Escape curly braces to prevent LangChain from parsing them as variables
    system_prompt = system_prompt.replace("{", "{{").replace("}", "}}")
    user_prompt = user_prompt.replace("{", "{{").replace("}", "}}")
    
    prompt = ChatPromptTemplate.from_messages([
        ("system", system_prompt),
        ("human", user_prompt),
    ])

    chain = prompt | llm
    response = chain.invoke({})

    content = response.content.strip()
    print_log(f"        [Direct] Reasoning: {content}...")

    # Chat mode: return raw response without answer extraction
    if not all_choices:
        print_log("        [Direct] Chat mode - returning natural response")
        return {"answer": "", "raw_response": content}

    answer = extract_answer(content, num_choices=len(all_choices) or 4)
    print_log(f"        [Direct] Final Answer: {answer}")
    return {"answer": answer, "raw_response": content}