Sulaiman8's picture
Upload all the files
45e9462 verified
from data import debug_print,llm1
from langchain_core.messages import HumanMessage
from pydantic_schema import IntentClassification,RouterResult
import pprint
from langgraph.graph import add_messages
from langgraph.managed import IsLastStep
from typing import TypedDict, List, Dict, Any, Sequence,Optional
from typing_extensions import Annotated
from pydantic_schema import IntentClassification
# LangGraph State
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, BaseMessage
class CreditCardState(TypedDict):
raw_query: str
query: str
preferences: str
multi_queries: List[str]
excluded_cards: List[str]
intent: IntentClassification
query_intent: bool
include_cobranded: bool
use_eligibility: bool
age: int
income: float
cibil: int
min_joining_fee: float
max_joining_fee: float
min_annual_fee: float
max_annual_fee: float
cards: List[str]
card_links: List[str]
ranked_cards: List[Dict[str, Any]]
card_names: List[str]
card_lookup: Dict[str, str]
top_card: str
top_card_description: list[str]
card_rows: List[List[str]]
trigger_compare: bool
trigger_chat: bool
user_message: str
chat_history: List
selected_cards: List[str]
comparison_result: str
messages: Annotated[Sequence[BaseMessage], add_messages]
is_last_step: IsLastStep
neo4j_error: bool
router_decision: Optional["RouterResult"]
new_card_info: Optional[str]
def get_pretty_state_string(state: CreditCardState | dict) -> str:
"""
Returns a pretty-printed string representation of the given state dictionary.
Args:
state: The state dictionary to pretty-print.
Returns:
A string containing the pretty-printed state.
"""
return pprint.pformat(state, indent=2)
# LangGraph Nodes
async def intent_classifier_node(state: CreditCardState):
"""
Classifies the user query into one of three intents:
- 'credit-card-recommendation'
- 'general-credit-related'
- 'out-of-scope'
Uses LLaMA model (via vLLM) to do classification with minimal prompt + output post-processing.
"""
debug_print("NODE", f"Entered intent_classifier_node with state:\n{get_pretty_state_string(state)}\n")
query = state["query"]
system_prompt = SystemMessage(content="""You are an AI system that classifies a user's query into one of three categories. Your response MUST be a JSON object with a single key named "intent".
**Classification Rules:**
1. **credit-card-recommendation:** Use for queries seeking a specific card recommendation based on needs, lifestyle, or rewards.
2. **Also `credit-card-recommendation`:** Use for queries that are just a list of relevant keywords like "cashback", "travel", or "food, travel, cashback".
3. **general-credit-related:** Use for queries asking for general information ONLY about **credit cards or financial terms** (e.g., "what is APR?").
4. **out-of-scope:** Use for ANY query that is not about credit cards, or is inappropriate, harmful, or nonsensical.
**--- EXAMPLES ---**
- "Find me a good cashback card" -> {"intent": "credit-card-recommendation"}
- "cashback, food, travel" -> {"intent": "credit-card-recommendation"}
- "what is a balance transfer?" -> {"intent": "general-credit-related"}
- "what is the weather?" -> {"intent": "out-of-scope"}
""")
intent_prompt_messages = [system_prompt, HumanMessage(content=f"Classify this user query: '{query}'")]
try:
json_schema = IntentClassification.model_json_schema()
print("Calling the llama model for structured intent classification...")
response_obj = await llm1.ainvoke(
intent_prompt_messages,
extra_body={
"guided_json": json_schema,
"max_tokens": 30,
"temperature": 0.0
}
)
print("Response generated.")
debug_print("INTENT_RAW_JSON", response_obj.content)
structured_response = IntentClassification.model_validate_json(response_obj.content)
intent = structured_response.intent
debug_print("INTENT", f"Final classification: {intent}")
except Exception as e:
debug_print("ERROR", f"Intent classification failed: {e}")
intent = "out-of-scope"
return {"intent": intent}
async def general_info_handler_node(state: CreditCardState):
"""
Handles general, informational credit-card-related queries using the LLaMA model (via vLLM).
"""
debug_print("NODE", f"Entered general_info_handler_node with state:\n {get_pretty_state_string(state)}\n")
query = state["query"]
general_info_prompt_messages = [
SystemMessage(content="You are a helpful and knowledgeable financial assistant. Your task is to answer the user's general question about credit cards or related financial topics.\n\n- Provide a clear, accurate, and concise answer.\n- Do not recommend any specific credit card products. Keep the response general and educational.\n- Structure your answer for readability.\n"),
HumanMessage(content=f"Answer:\nUser Query: {query}\nAnswer:")
]
response_obj = await llm1.ainvoke(
general_info_prompt_messages,
config={"max_tokens": 200, "temperature": 0.7}
)
response = response_obj.content
if "Answer:" in response:
response = response.split("Answer:")[-1].strip()
return {
"top_card": "",
"top_card_description": [response],
"card_rows": [],
"card_names": [],
"card_lookup": {}
}
async def oos_handler_node(state: CreditCardState):
"""
Handles out-of-scope queries by using LLaMA (via vLLM) to generate a friendly refusal message.
It informs the user that only credit card recommendation queries are supported.
"""
debug_print("NODE", f"Entered oos_handler_node with state:\n {get_pretty_state_string(state)}\n")
query = state["query"]
oos_prompt_messages = [
SystemMessage(content="You are a specialized assistant for a credit card recommendation service.\nYour ONLY function is to politely decline out-of-scope questions.\n\n- Do NOT answer the user’s question.\n- Do NOT engage in unrelated conversation.\n- Simply explain that your scope is limited to credit card recommendations.\n"),
HumanMessage(content=f"Assistant:\nUser Query: {query}\nAssistant:")
]
response_obj = await llm1.ainvoke(
oos_prompt_messages,
config={"max_tokens": 80, "temperature": 0.7}
)
response = response_obj.content
if "Assistant:" in response:
response = response.split("Assistant:")[-1].strip()
return {
"top_card": "",
"top_card_description": [response],
"card_rows": [],
"card_names": [],
"card_lookup": {}
}