from data import all_card_lookup,eligibility_lookup,debug_print,llm1 from pydantic_schema import RouterResult from nodes.intent import CreditCardState from langchain_core.tools import tool from typing import List from langgraph.prebuilt import ToolNode from langchain_core.messages import SystemMessage, HumanMessage import pprint # Chat node with tool @tool def fetch_card_details(card_names: List[str]) -> str: """Fetches details for a list of credit cards from the database. Use this tool to get information about specific credit cards when the user asks about them by name. This is useful for comparing cards or getting details about cards not in the initial recommendation. Args: card_names: A list of exact credit card names to fetch details for. Returns: A string containing the details of the requested cards. """ debug_print("TOOL", f"fetch_card_details called with cards: {card_names}") case_insensitive_desc_lookup = {k.lower(): v for k, v in all_card_lookup.items()} case_insensitive_elig_lookup = {k.lower(): v for k, v in eligibility_lookup.items()} details = [] for card_name in card_names: lookup_key = card_name.lower() description = case_insensitive_desc_lookup.get(lookup_key, "Description not found.") eligibility = case_insensitive_elig_lookup.get(lookup_key, "No eligibility or fee information available.") card_details = f"Card: {card_name}\nDescription: {description}\n\nEligibility & Fees:\n{eligibility}" details.append(card_details) if not details: return "Could not find details for the requested cards. Please check the card names." return "\n\n---\n\n".join(details) CHAT_TOOLS = [fetch_card_details] chat_tool_node = ToolNode(CHAT_TOOLS) #decides if tool call is required async def router_node(state: CreditCardState) -> dict: debug_print("NODE", "Entering VISIBLE Chain-of-Thought router_node") known_card_names = state['card_names'] user_query = state['messages'][-1].content known_cards_sentence = ", ".join(f"'{name}'" for name in known_card_names) # Prompt to generate scratchpad/reasoning. think_prompt = f""" You are an expert routing agent. Your job is to analyze a user's query and write down your reasoning for whether a new credit card needs to be fetched. **Follow these steps:** 1. **Identify:** Find the specific credit card names mentioned in the User Query. 2. **Compare:** Check if those names exist in the list of Known Card Names. 3. **Conclude:** State your final conclusion about whether a tool is needed. **Known Card Names :** We already have information on the following cards: {known_cards_sentence}. **User Query:** "{user_query}" **Your Reasoning Scratchpad:** """ debug_print("CoT", "Generating reasoning scratchpad...") reasoning_response = await llm1.ainvoke([HumanMessage(content=think_prompt)]) scratchpad = reasoning_response.content debug_print("CoT", f"Generated Scratchpad:\n---SCRATCHPAD---\n{scratchpad}\n----------------") decide_prompt = f""" Based on the following reasoning scratchpad, provide your final decision in the required JSON format. **Required Format** 1. `decision`: This must be either "call_tool" or "answer_from_context". 2. `card_names_to_fetch`: If the decision is "call_tool", this must be a list of the new card names found in the query. Otherwise, it should be null. **Reasoning Scratchpad:** {scratchpad} **Final JSON Output:** """ json_schema = RouterResult.model_json_schema() debug_print("CoT", "Generating final decision from scratchpad...") final_response = await llm1.ainvoke( [HumanMessage(content=decide_prompt)], extra_body={"guided_json": json_schema} ) router_decision = RouterResult.model_validate_json(final_response.content) debug_print("ROUTER", f"Final Decision: {router_decision}") return {"router_decision": router_decision} #calls the tool def tool_node(state: CreditCardState) -> dict: debug_print("NODE", "Entering tool_node") card_names = state['router_decision'].card_names_to_fetch if not card_names: return {"new_card_info": None} new_info = fetch_card_details.invoke({"card_names": card_names}) return {"new_card_info": new_info} #combines the tool results and generates the final reply async def expert_agent_node(state: CreditCardState) -> dict: print("\n" + "="*60) debug_print("EXPERT_AGENT_ENTRY", "Full state entering expert_agent_node:") pprint.pprint(state, indent=2) print("="*60 + "\n") system_prompt_from_ui = state['messages'][0].content new_card_info = state.get("new_card_info") chat_history = state['messages'][1:] if new_card_info: final_system_prompt = system_prompt_from_ui + "\n\n\n" + new_card_info + "\n" else: final_system_prompt = system_prompt_from_ui messages_to_send = [SystemMessage(content=final_system_prompt)] + chat_history print("\n" + "-"*60) debug_print("EXPERT_AGENT_PROMPT", "Final prompt being sent to LLM:") pprint.pprint(messages_to_send) print("-"*60 + "\n") response = await llm1.ainvoke(messages_to_send) return {"messages": [response]}