File size: 7,190 Bytes
45e9462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
from data import debug_print,llm1
from langchain_core.messages import HumanMessage
from pydantic_schema import IntentClassification,RouterResult
import pprint
from langgraph.graph import add_messages
from langgraph.managed import IsLastStep
from typing import TypedDict, List, Dict, Any, Sequence,Optional
from typing_extensions import Annotated
from pydantic_schema import IntentClassification

# LangGraph State
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, BaseMessage
class CreditCardState(TypedDict):
    raw_query: str 
    query: str
    preferences: str
    multi_queries: List[str]
    excluded_cards: List[str]
    intent: IntentClassification
    query_intent: bool
    include_cobranded: bool
    use_eligibility: bool
    age: int
    income: float
    cibil: int
    min_joining_fee: float
    max_joining_fee: float
    min_annual_fee: float
    max_annual_fee: float
    cards: List[str]
    card_links: List[str]
    ranked_cards: List[Dict[str, Any]]
    card_names: List[str]
    card_lookup: Dict[str, str]
    top_card: str
    top_card_description: list[str]
    card_rows: List[List[str]]
    trigger_compare: bool 
    trigger_chat: bool
    user_message: str   
    chat_history: List
    selected_cards: List[str]
    comparison_result: str

    messages: Annotated[Sequence[BaseMessage], add_messages]
    is_last_step: IsLastStep
    neo4j_error: bool
    router_decision: Optional["RouterResult"]
    new_card_info: Optional[str]

def get_pretty_state_string(state: CreditCardState | dict) -> str:
    """

    Returns a pretty-printed string representation of the given state dictionary.

    Args:

        state: The state dictionary to pretty-print.

    Returns:

        A string containing the pretty-printed state.

    """
    return pprint.pformat(state, indent=2)

# LangGraph Nodes

async def intent_classifier_node(state: CreditCardState): 
    """

    Classifies the user query into one of three intents:

    - 'credit-card-recommendation'

    - 'general-credit-related'

    - 'out-of-scope'

    

    Uses LLaMA model (via vLLM) to do classification with minimal prompt + output post-processing.

    """
    
    debug_print("NODE", f"Entered intent_classifier_node with state:\n{get_pretty_state_string(state)}\n")

    query = state["query"]

    system_prompt = SystemMessage(content="""You are an AI system that classifies a user's query into one of three categories. Your response MUST be a JSON object with a single key named "intent".



        **Classification Rules:**



        1.  **credit-card-recommendation:** Use for queries seeking a specific card recommendation based on needs, lifestyle, or rewards.

        2.  **Also `credit-card-recommendation`:** Use for queries that are just a list of relevant keywords like "cashback", "travel", or "food, travel, cashback".

        3.  **general-credit-related:** Use for queries asking for general information ONLY about **credit cards or financial terms** (e.g., "what is APR?").

        4.  **out-of-scope:** Use for ANY query that is not about credit cards, or is inappropriate, harmful, or nonsensical.



        **--- EXAMPLES ---**

        - "Find me a good cashback card" -> {"intent": "credit-card-recommendation"}

        - "cashback, food, travel" -> {"intent": "credit-card-recommendation"}

        - "what is a balance transfer?" -> {"intent": "general-credit-related"}

        - "what is the weather?" -> {"intent": "out-of-scope"}

        """)
    
    intent_prompt_messages = [system_prompt, HumanMessage(content=f"Classify this user query: '{query}'")]

    try:
        json_schema = IntentClassification.model_json_schema()

        print("Calling the llama model for structured intent classification...")
        
        response_obj = await llm1.ainvoke(
            intent_prompt_messages,
            extra_body={
                "guided_json": json_schema,
                "max_tokens": 30,
                "temperature": 0.0
            }
        )
        print("Response generated.")
        
        debug_print("INTENT_RAW_JSON", response_obj.content)
        
        structured_response = IntentClassification.model_validate_json(response_obj.content)
        intent = structured_response.intent
        
        debug_print("INTENT", f"Final classification: {intent}")

    except Exception as e:
        debug_print("ERROR", f"Intent classification failed: {e}")
        intent = "out-of-scope"

    return {"intent": intent}
    
async def general_info_handler_node(state: CreditCardState): 
    """

    Handles general, informational credit-card-related queries using the LLaMA model (via vLLM).

    """

    debug_print("NODE", f"Entered general_info_handler_node with state:\n {get_pretty_state_string(state)}\n")
    query = state["query"]

   
    general_info_prompt_messages = [
        SystemMessage(content="You are a helpful and knowledgeable financial assistant. Your task is to answer the user's general question about credit cards or related financial topics.\n\n- Provide a clear, accurate, and concise answer.\n- Do not recommend any specific credit card products. Keep the response general and educational.\n- Structure your answer for readability.\n"),
        HumanMessage(content=f"Answer:\nUser Query: {query}\nAnswer:")
    ]

    response_obj = await llm1.ainvoke( 
        general_info_prompt_messages,
        config={"max_tokens": 200, "temperature": 0.7} 
    )
    response = response_obj.content
   
    if "Answer:" in response:
        response = response.split("Answer:")[-1].strip()

    return {
        "top_card": "",
        "top_card_description": [response],
        "card_rows": [],
        "card_names": [],
        "card_lookup": {}
    }

async def oos_handler_node(state: CreditCardState): 
    """

    Handles out-of-scope queries by using LLaMA (via vLLM) to generate a friendly refusal message.

    It informs the user that only credit card recommendation queries are supported.

    """

    debug_print("NODE", f"Entered oos_handler_node with state:\n {get_pretty_state_string(state)}\n")
    
    query = state["query"]
    
    oos_prompt_messages = [
        SystemMessage(content="You are a specialized assistant for a credit card recommendation service.\nYour ONLY function is to politely decline out-of-scope questions.\n\n- Do NOT answer the user’s question.\n- Do NOT engage in unrelated conversation.\n- Simply explain that your scope is limited to credit card recommendations.\n"),
        HumanMessage(content=f"Assistant:\nUser Query: {query}\nAssistant:")
    ]

    response_obj = await llm1.ainvoke( 
        oos_prompt_messages,
        config={"max_tokens": 80, "temperature": 0.7} 
    )
    response = response_obj.content 
    if "Assistant:" in response:
        response = response.split("Assistant:")[-1].strip()

    return {
        "top_card": "",
        "top_card_description": [response],
        "card_rows": [],
        "card_names": [],
        "card_lookup": {}
    }