cryogenic22 commited on
Commit
b24c73d
·
verified ·
1 Parent(s): 08ad87f

Create utils/langgraph_conversation.py

Browse files
Files changed (1) hide show
  1. utils/langgraph_conversation.py +164 -0
utils/langgraph_conversation.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # utils/langgraph_conversation.py
2
+
3
+ from langgraph.graph import StateGraph, END
4
+ from langchain.memory import ConversationBufferMemory
5
+ from langchain.prompts import ChatPromptTemplate
6
+ from langchain.chat_models import ChatAnthropic
7
+ from langchain.schema import HumanMessage, AIMessage
8
+ import streamlit as st
9
+
10
+ class ConversationalLearningGraph:
11
+ def __init__(self, anthropic_api_key):
12
+ self.llm = ChatAnthropic(anthropic_api_key=anthropic_api_key)
13
+ self.memory = ConversationBufferMemory(
14
+ memory_key="chat_history",
15
+ return_messages=True
16
+ )
17
+ self.graph = self._create_graph()
18
+
19
+ def _create_graph(self):
20
+ # Create the graph
21
+ workflow = StateGraph(StateGraph.from_empty())
22
+
23
+ # Add nodes for different conversation stages
24
+ workflow.add_node("understand_question", self._understand_question)
25
+ workflow.add_node("check_prerequisites", self._check_prerequisites)
26
+ workflow.add_node("generate_response", self._generate_response)
27
+ workflow.add_node("suggest_next_topics", self._suggest_next_topics)
28
+
29
+ # Define the edges
30
+ workflow.add_edge("understand_question", "check_prerequisites")
31
+ workflow.add_edge("check_prerequisites", "generate_response")
32
+ workflow.add_edge("generate_response", "suggest_next_topics")
33
+ workflow.add_edge("suggest_next_topics", END)
34
+
35
+ # Add conditional edges
36
+ workflow.add_conditional_edges(
37
+ "check_prerequisites",
38
+ self._needs_prerequisites,
39
+ {
40
+ True: "understand_question", # Loop back if prerequisites needed
41
+ False: "generate_response" # Continue if prerequisites met
42
+ }
43
+ )
44
+
45
+ return workflow.compile()
46
+
47
+ async def _understand_question(self, state):
48
+ """Analyze and categorize the question"""
49
+ question = state['question']
50
+
51
+ prompt = ChatPromptTemplate.from_messages([
52
+ ("system", "You are an expert at understanding trading questions."),
53
+ ("human", "Analyze this trading question: {question}")
54
+ ])
55
+
56
+ response = await self.llm.ainvoke(prompt.format_messages(question=question))
57
+
58
+ return {
59
+ **state,
60
+ "question_analysis": response.content,
61
+ "category": self._categorize_question(response.content)
62
+ }
63
+
64
+ def _check_prerequisites(self, state):
65
+ """Check if user needs prerequisite knowledge"""
66
+ history = self.memory.chat_memory.messages
67
+ return {
68
+ **state,
69
+ "needs_prerequisites": self._evaluate_prerequisites(
70
+ state['category'],
71
+ history
72
+ )
73
+ }
74
+
75
+ async def _generate_response(self, state):
76
+ """Generate a detailed response"""
77
+ prompt = ChatPromptTemplate.from_messages([
78
+ ("system", "You are an expert trading educator."),
79
+ ("human", """Given this trading question and context:
80
+ Question: {question}
81
+ Category: {category}
82
+ Previous discussion: {history}
83
+
84
+ Provide a detailed, educational response.""")
85
+ ])
86
+
87
+ response = await self.llm.ainvoke(
88
+ prompt.format_messages(
89
+ question=state['question'],
90
+ category=state['category'],
91
+ history=self.memory.chat_memory.messages
92
+ )
93
+ )
94
+
95
+ return {
96
+ **state,
97
+ "response": response.content
98
+ }
99
+
100
+ async def _suggest_next_topics(self, state):
101
+ """Suggest related topics to explore"""
102
+ prompt = ChatPromptTemplate.from_messages([
103
+ ("system", "Suggest related trading topics to explore next."),
104
+ ("human", """Based on:
105
+ Current topic: {question}
106
+ Response given: {response}
107
+
108
+ Suggest 3 related topics to explore next.""")
109
+ ])
110
+
111
+ suggestions = await self.llm.ainvoke(
112
+ prompt.format_messages(
113
+ question=state['question'],
114
+ response=state['response']
115
+ )
116
+ )
117
+
118
+ return {
119
+ **state,
120
+ "next_topics": suggestions.content
121
+ }
122
+
123
+ def _needs_prerequisites(self, state):
124
+ """Determine if prerequisites are needed"""
125
+ return state.get('needs_prerequisites', False)
126
+
127
+ def _categorize_question(self, analysis):
128
+ """Categorize the question type"""
129
+ categories = [
130
+ "basic_concepts",
131
+ "technical_analysis",
132
+ "risk_management",
133
+ "trading_strategy",
134
+ "market_mechanics"
135
+ ]
136
+ # Implement categorization logic
137
+ return "basic_concepts" # Placeholder
138
+
139
+ def _evaluate_prerequisites(self, category, history):
140
+ """Evaluate if user needs prerequisites"""
141
+ # Implement prerequisite checking logic
142
+ return False # Placeholder
143
+
144
+ async def process_question(self, question):
145
+ """Process a question through the conversation graph"""
146
+ # Add question to memory
147
+ self.memory.chat_memory.add_user_message(question)
148
+
149
+ # Initialize state
150
+ initial_state = {
151
+ "question": question,
152
+ "memory": self.memory
153
+ }
154
+
155
+ # Run the graph
156
+ final_state = await self.graph.arun(initial_state)
157
+
158
+ # Add response to memory
159
+ self.memory.chat_memory.add_ai_message(final_state['response'])
160
+
161
+ return {
162
+ 'response': final_state['response'],
163
+ 'next_topics': final_state['next_topics']
164
+ }