File size: 3,169 Bytes
45e9462 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from langchain_core.messages import AIMessage
from data import debug_print,llm1
from nodes.intent import get_pretty_state_string,CreditCardState
from langchain_core.messages import AIMessage, SystemMessage, HumanMessage
#generating the final response from the top 5 cards
async def agent_node(state: CreditCardState):
debug_print("NODE", f"Entered agent_node with state:\n{get_pretty_state_string(state)}\n")
full_query = state["raw_query"]
selected_preferences = f"\nUser Preferences: {state['preferences']}\n" if state["preferences"] else ""
ranked_cards_raw = state.get("ranked_cards", [])
formatted_cards = ""
if isinstance(ranked_cards_raw, list):
for idx, card in enumerate(ranked_cards_raw, 1):
name = card.get("name", "Unnamed Card")
desc = card.get("description", "")
formatted_cards += f"Card {idx}: {name}; {desc}\n"
else:
formatted_cards = str(ranked_cards_raw)
ranked_cards = formatted_cards.strip()
input_message = f"""## User Query: {full_query}
{selected_preferences}
Ranked Cards: {ranked_cards}
### Instructions:
You are given a user query and a list of ranked cards.
1. First, assess whether the provided card list is sufficient and relevant to confidently answer the user's query.
- If the list is not relevant or lacks information, state that more information is needed.
2. If the list is relevant, follow these strict rules to select and explain the best one:
1. Analyze the user's need from the given query.
2. Select the best card based only on the details in the descriptions.
3. Explain why it's the best choice (mention only what's explicitly written).
4. Do not assume any benefit that is not stated.
5. Use simple, structured output with no symbols like * or #.
6. If the user asks for FD-based cards or is a beginner, assume all given cards are FD-based and choose the best.
"""
messages_for_llm_input = []
messages_for_llm_input.append(SystemMessage(content="You are a credit card recommendation agent."))
# Add the current human input message
messages_for_llm_input.append(HumanMessage(content=input_message))
try:
debug_print("AGENT", "Starting vLLM agent generation via ChatOpenAI...")
response_obj = await llm1.ainvoke(
messages_for_llm_input,
config={
"max_tokens": 512,
"temperature": 0.7,
"top_p": 0.9,
}
)
response_text = response_obj.content
debug_print("AGENT", f"Decoded Response (truncated):\n{response_text[:500]}...")
except Exception as e:
error_str = str(e)
debug_print("ERROR", f"Error during generation in agent_node: {error_str}")
return {
"messages": [
AIMessage(
content=f"Oops! An error occurred during AI generation: {error_str}",
additional_kwargs={"error": True}
)
]
}
return {"messages": [AIMessage(content=response_text)]} |