File size: 6,727 Bytes
b35a113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
973314e
b35a113
c17585b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b35a113
 
 
 
973314e
b35a113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
from langgraph.graph import StateGraph, START, END
from typing import TypedDict, Annotated
from langchain_core.messages import HumanMessage, BaseMessage, AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.tools import tool
from dotenv import load_dotenv

load_dotenv()

# =====================================================
# STATE
# =====================================================
class ChatState(TypedDict):
    messages: Annotated[list[BaseMessage], add_messages]


# =====================================================
# TOOL DEFINITIONS
# =====================================================

@tool
def tavily_search(query: str) -> dict:
    """
    Perform a web search using Tavily.
    Use this when up-to-date or factual information is required.
    """
    try:
        search = TavilySearchResults(max_results=5)
        results = search.run(query)
        return {"query": query, "results": results}
    except Exception as e:
        return {"error": str(e)}


tools = [tavily_search]


# =====================================================
# LLM CONFIGURATION
# =====================================================
llm = ChatOpenAI(
    model="gpt-4.1-nano",
    streaming=True
).bind_tools(tools)


# =====================================================
# SYSTEM PROMPT (MINIMALLY ENHANCED)
# =====================================================
SYSTEM_MESSAGE = SystemMessage(
    content=(
        "You are RiskGuard AI — an intelligent, production-grade credit risk and financial guidance assistant.\n\n"

        "You were designed and engineered by Junaid, an AI and Machine Learning practitioner with strong experience "
        "in data science, predictive modeling, and building real-world AI systems. "
        "This platform evolved from a simple classification model into a full-featured AI system with conversational intelligence, "
        "memory, real-time reasoning, and voice interaction.\n\n"

        "Your purpose is to help users understand their financial situation in a clear, calm, and human way — "
        "similar to how a knowledgeable financial advisor would explain things during a conversation.\n\n"

        "When responding, follow these principles:\n"
        "- Speak naturally and conversationally (avoid sounding like documentation)\n"
        "- Structure responses logically, but avoid numbered or rigid lists unless clearly helpful\n"
        "- Keep explanations clear, friendly, and easy to follow\n"
        "- Use short paragraphs instead of long blocks of text\n"
        "- Explain *why* something matters, not just *what* it is\n\n"

        "Your role includes:\n"
        "- Helping users understand their credit risk and financial standing\n"
        "- Explaining model-driven insights in plain language\n"
        "- Offering practical, realistic improvement suggestions\n"
        "- Supporting follow-up questions with context awareness\n\n"

        "You may reference system capabilities when useful, such as:\n"
        "- AI-based credit risk analysis\n"
        "- Context-aware conversations using memory\n"
        "- Real-time responses with streaming\n"
        "- Speech-to-text and text-to-speech interactions\n\n"

        "However, avoid technical deep dives unless the user explicitly asks for them.\n\n"

        "Tone guidelines:\n"
        "- Calm, professional, and reassuring\n"
        "- Confident but not authoritative\n"
        "- Helpful without being overwhelming\n\n"

        "Ethical guidelines:\n"
        "- Do not provide legal, tax, or investment guarantees\n"
        "- Avoid speculation or assumptions\n"
        "- Encourage informed and responsible decision-making\n\n"

        "Your ultimate goal is to help users feel informed, confident, and supported, "
        "while reflecting the quality and professionalism of a real-world financial AI system."
    )
)



# =====================================================
# CHAT NODE
# =====================================================
def chat_node(state: ChatState):
    messages = [SYSTEM_MESSAGE] + state["messages"]
    response = llm.invoke(messages)
    return {"messages": [response]}


# =====================================================
# GRAPH SETUP
# =====================================================
checkpointer = MemorySaver()

graph = StateGraph(ChatState)

graph.add_node("chat_node", chat_node)
graph.add_node("tools", ToolNode(tools))

# Routing logic
graph.add_edge(START, "chat_node")
graph.add_conditional_edges("chat_node", tools_condition)
graph.add_edge("tools", "chat_node")
graph.add_edge("chat_node", END)

financial_advisor_chatbot = graph.compile(checkpointer=checkpointer)


# =====================================================
# INPUT FORMATTER
# =====================================================
def format_chat_input(probability, credit_score, rating, advisor_reply, user_message):
    return f"""
CREDIT ANALYSIS
---------------
Credit Score: {credit_score}
Default Probability: {probability:.2%}
Rating: {rating}

AI ADVISOR SUMMARY:
{advisor_reply}

USER MESSAGE:
{user_message}
"""


# =====================================================
# NORMAL RESPONSE (NON-STREAMING)
# =====================================================
def ask_chatbot(probability, credit_score, rating, advisor_reply, user_message, thread_id):

    formatted = format_chat_input(
        probability,
        credit_score,
        rating,
        advisor_reply,
        user_message
    )

    state = {"messages": [HumanMessage(content=formatted)]}
    config = {"configurable": {"thread_id": thread_id}}

    result = financial_advisor_chatbot.invoke(state, config=config)
    return result["messages"][-1].content


# =====================================================
# STREAMING RESPONSE
# =====================================================
def ask_chatbot_stream(probability, credit_score, rating, advisor_reply, user_message, thread_id):

    formatted = format_chat_input(
        probability,
        credit_score,
        rating,
        advisor_reply,
        user_message
    )

    state = {"messages": [HumanMessage(content=formatted)]}
    config = {"configurable": {"thread_id": thread_id}}

    for event in financial_advisor_chatbot.stream(state, config=config):
        if "messages" in event:
            msg = event["messages"][-1]
            if isinstance(msg, AIMessage) and msg.content:
                yield msg.content