cryogenic22 commited on
Commit
2e7bebc
·
verified ·
1 Parent(s): 9e801ae

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +531 -0
app.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import anthropic
4
+ from langraph.graph import Graph, StateGraph
5
+ from langraph.prelude import Container
6
+ from langraph.checkpoint import persist
7
+ from langchain_anthropic import ChatAnthropic
8
+ import json
9
+ from typing import Dict, List, Optional, Any, TypedDict
10
+ import time
11
+ import pandas as pd
12
+
13
+ # Set page configuration
14
+ st.set_page_config(
15
+ page_title="Thinking Agent System",
16
+ page_icon="🧠",
17
+ layout="wide",
18
+ initial_sidebar_state="expanded"
19
+ )
20
+
21
+ # Session state initialization
22
+ if "messages" not in st.session_state:
23
+ st.session_state.messages = []
24
+ if "thinking_logs" not in st.session_state:
25
+ st.session_state.thinking_logs = []
26
+ if "agent_graph" not in st.session_state:
27
+ st.session_state.agent_graph = None
28
+ if "current_step" not in st.session_state:
29
+ st.session_state.current_step = 0
30
+ if "persona_configs" not in st.session_state:
31
+ st.session_state.persona_configs = {
32
+ "researcher": {
33
+ "name": "Researcher",
34
+ "description": "A careful researcher who examines all angles of a problem",
35
+ "system_prompt": """You are a thoughtful researcher who carefully analyzes questions or problems.
36
+ Your role is to break down complex questions, consider different approaches, identify assumptions,
37
+ and provide comprehensive analysis. Consider multiple perspectives and potential weaknesses in different lines of reasoning.
38
+ Your goal is to explore the problem space thoroughly before jumping to conclusions."""
39
+ },
40
+ "critic": {
41
+ "name": "Critic",
42
+ "description": "Identifies potential issues and challenges with approaches",
43
+ "system_prompt": """You are a thoughtful critic who examines potential flaws or weaknesses in reasoning.
44
+ Your role is to find potential issues with an approach, identify hidden assumptions,
45
+ and suggest alternative ways of looking at problems.
46
+ You're not negative, but constructively critical - your goal is to strengthen the analysis."""
47
+ },
48
+ "synthesizer": {
49
+ "name": "Synthesizer",
50
+ "description": "Pulls together insights into a cohesive response",
51
+ "system_prompt": """You are a thoughtful synthesizer who pulls together different insights into a coherent whole.
52
+ Your role is to examine the components of analysis that have been done, identify the key themes and insights,
53
+ and create a unified response that incorporates the most important elements.
54
+ You should balance detail with clarity, ensuring the final response is both comprehensive and accessible."""
55
+ },
56
+ "meta_agent": {
57
+ "name": "Meta Agent",
58
+ "description": "Coordinates the thinking process across agents",
59
+ "system_prompt": """You are the coordinator of a multi-agent thinking system. Your job is to:
60
+ 1. Understand the user's question
61
+ 2. Decide which thinking agents need to be engaged and in what order
62
+ 3. Pass information between agents as needed
63
+ 4. Determine when sufficient analysis has been done to provide a response
64
+ 5. Ensure the process is both thorough and efficient
65
+
66
+ You have these agents at your disposal:
67
+ - Researcher: Breaks down problems and explores them thoroughly
68
+ - Critic: Examines potential flaws in reasoning and suggests alternatives
69
+ - Synthesizer: Pulls together insights into a cohesive response
70
+
71
+ Think carefully about how to best deploy these agents for each query."""
72
+ }
73
+ }
74
+
75
+ # Function to create and configure the Claude client
76
+ def get_claude_client():
77
+ api_key = os.environ.get("ANTHROPIC_API_KEY", st.session_state.get("anthropic_api_key", ""))
78
+ if not api_key:
79
+ st.error("Please set your Anthropic API key in the settings.")
80
+ return None
81
+
82
+ return anthropic.Anthropic(api_key=api_key)
83
+
84
+ # Function to create a LangChain ChatAnthropic instance
85
+ def get_langchain_claude():
86
+ api_key = os.environ.get("ANTHROPIC_API_KEY", st.session_state.get("anthropic_api_key", ""))
87
+ if not api_key:
88
+ st.error("Please set your Anthropic API key in the settings.")
89
+ return None
90
+
91
+ return ChatAnthropic(
92
+ model="claude-3-7-sonnet-20250219",
93
+ temperature=0.1,
94
+ anthropic_api_key=api_key
95
+ )
96
+
97
+ # Load or create agent graph
98
+ def create_agent_graph():
99
+ # Create a typed dict for agent state
100
+ class AgentState(TypedDict):
101
+ query: str
102
+ thoughts: Dict[str, List[str]]
103
+ current_agent: str
104
+ final_response: Optional[str]
105
+ history: List[Dict[str, Any]]
106
+
107
+ # Initialize the graph
108
+ graph = StateGraph(AgentState)
109
+
110
+ # Define the nodes (agents)
111
+ @graph.node
112
+ def initialize(state: AgentState) -> AgentState:
113
+ return {
114
+ **state,
115
+ "thoughts": {"meta_agent": [], "researcher": [], "critic": [], "synthesizer": []},
116
+ "current_agent": "meta_agent",
117
+ "history": []
118
+ }
119
+
120
+ @graph.node
121
+ def meta_agent(state: AgentState) -> AgentState:
122
+ client = get_claude_client()
123
+ if not client:
124
+ return state
125
+
126
+ system_prompt = st.session_state.persona_configs["meta_agent"]["system_prompt"]
127
+
128
+ # Construct the message based on history and current query
129
+ history_text = ""
130
+ if state.get("history"):
131
+ for entry in state["history"]:
132
+ if entry.get("agent") and entry.get("thought"):
133
+ history_text += f"\n## {entry['agent']} thought:\n{entry['thought']}\n"
134
+
135
+ message = client.messages.create(
136
+ model="claude-3-7-sonnet-20250219",
137
+ system=system_prompt,
138
+ messages=[
139
+ {
140
+ "role": "user",
141
+ "content": f"User query: {state['query']}\n\n"
142
+ f"Current thinking process:\n{history_text}\n\n"
143
+ f"What should be the next step in the thinking process? Which agent should handle it next, "
144
+ f"and what specific aspect should they focus on? Or is the analysis sufficient to generate a final response?"
145
+ }
146
+ ],
147
+ temperature=0.1,
148
+ max_tokens=1000
149
+ )
150
+
151
+ thought = message.content[0].text
152
+
153
+ # Update the state
154
+ updated_thoughts = state["thoughts"].copy()
155
+ updated_thoughts["meta_agent"] = updated_thoughts.get("meta_agent", []) + [thought]
156
+
157
+ # Determine the next agent from the meta agent's response
158
+ next_agent = "meta_agent" # Default to meta_agent
159
+ if "researcher should" in thought.lower() or "have the researcher" in thought.lower():
160
+ next_agent = "researcher"
161
+ elif "critic should" in thought.lower() or "have the critic" in thought.lower():
162
+ next_agent = "critic"
163
+ elif "synthesizer should" in thought.lower() or "have the synthesizer" in thought.lower():
164
+ next_agent = "synthesizer"
165
+ elif "final response" in thought.lower() or "sufficient analysis" in thought.lower():
166
+ next_agent = "final"
167
+
168
+ updated_history = state.get("history", []).copy() + [{"agent": "Meta Agent", "thought": thought}]
169
+
170
+ return {
171
+ **state,
172
+ "thoughts": updated_thoughts,
173
+ "current_agent": next_agent,
174
+ "history": updated_history
175
+ }
176
+
177
+ @graph.node
178
+ def researcher(state: AgentState) -> AgentState:
179
+ client = get_claude_client()
180
+ if not client:
181
+ return state
182
+
183
+ system_prompt = st.session_state.persona_configs["researcher"]["system_prompt"]
184
+
185
+ # Construct the message based on history and current query
186
+ history_text = ""
187
+ if state.get("history"):
188
+ for entry in state["history"]:
189
+ if entry.get("agent") and entry.get("thought"):
190
+ history_text += f"\n## {entry['agent']} thought:\n{entry['thought']}\n"
191
+
192
+ message = client.messages.create(
193
+ model="claude-3-7-sonnet-20250219",
194
+ system=system_prompt,
195
+ messages=[
196
+ {
197
+ "role": "user",
198
+ "content": f"User query: {state['query']}\n\n"
199
+ f"Current thinking process:\n{history_text}\n\n"
200
+ f"Please provide your analysis as the Researcher agent."
201
+ }
202
+ ],
203
+ temperature=0.1,
204
+ max_tokens=1500
205
+ )
206
+
207
+ thought = message.content[0].text
208
+
209
+ # Update the state
210
+ updated_thoughts = state["thoughts"].copy()
211
+ updated_thoughts["researcher"] = updated_thoughts.get("researcher", []) + [thought]
212
+
213
+ updated_history = state.get("history", []).copy() + [{"agent": "Researcher", "thought": thought}]
214
+
215
+ return {
216
+ **state,
217
+ "thoughts": updated_thoughts,
218
+ "current_agent": "meta_agent", # Return to meta agent for next direction
219
+ "history": updated_history
220
+ }
221
+
222
+ @graph.node
223
+ def critic(state: AgentState) -> AgentState:
224
+ client = get_claude_client()
225
+ if not client:
226
+ return state
227
+
228
+ system_prompt = st.session_state.persona_configs["critic"]["system_prompt"]
229
+
230
+ # Construct the message based on history and current query
231
+ history_text = ""
232
+ if state.get("history"):
233
+ for entry in state["history"]:
234
+ if entry.get("agent") and entry.get("thought"):
235
+ history_text += f"\n## {entry['agent']} thought:\n{entry['thought']}\n"
236
+
237
+ message = client.messages.create(
238
+ model="claude-3-7-sonnet-20250219",
239
+ system=system_prompt,
240
+ messages=[
241
+ {
242
+ "role": "user",
243
+ "content": f"User query: {state['query']}\n\n"
244
+ f"Current thinking process:\n{history_text}\n\n"
245
+ f"Please provide your critical analysis as the Critic agent."
246
+ }
247
+ ],
248
+ temperature=0.1,
249
+ max_tokens=1500
250
+ )
251
+
252
+ thought = message.content[0].text
253
+
254
+ # Update the state
255
+ updated_thoughts = state["thoughts"].copy()
256
+ updated_thoughts["critic"] = updated_thoughts.get("critic", []) + [thought]
257
+
258
+ updated_history = state.get("history", []).copy() + [{"agent": "Critic", "thought": thought}]
259
+
260
+ return {
261
+ **state,
262
+ "thoughts": updated_thoughts,
263
+ "current_agent": "meta_agent", # Return to meta agent for next direction
264
+ "history": updated_history
265
+ }
266
+
267
+ @graph.node
268
+ def synthesizer(state: AgentState) -> AgentState:
269
+ client = get_claude_client()
270
+ if not client:
271
+ return state
272
+
273
+ system_prompt = st.session_state.persona_configs["synthesizer"]["system_prompt"]
274
+
275
+ # Construct the message based on history and current query
276
+ history_text = ""
277
+ if state.get("history"):
278
+ for entry in state["history"]:
279
+ if entry.get("agent") and entry.get("thought"):
280
+ history_text += f"\n## {entry['agent']} thought:\n{entry['thought']}\n"
281
+
282
+ message = client.messages.create(
283
+ model="claude-3-7-sonnet-20250219",
284
+ system=system_prompt,
285
+ messages=[
286
+ {
287
+ "role": "user",
288
+ "content": f"User query: {state['query']}\n\n"
289
+ f"Current thinking process:\n{history_text}\n\n"
290
+ f"Please synthesize the insights and provide a cohesive analysis as the Synthesizer agent."
291
+ }
292
+ ],
293
+ temperature=0.1,
294
+ max_tokens=1500
295
+ )
296
+
297
+ thought = message.content[0].text
298
+
299
+ # Update the state
300
+ updated_thoughts = state["thoughts"].copy()
301
+ updated_thoughts["synthesizer"] = updated_thoughts.get("synthesizer", []) + [thought]
302
+
303
+ updated_history = state.get("history", []).copy() + [{"agent": "Synthesizer", "thought": thought}]
304
+
305
+ return {
306
+ **state,
307
+ "thoughts": updated_thoughts,
308
+ "current_agent": "meta_agent", # Return to meta agent for next direction
309
+ "history": updated_history
310
+ }
311
+
312
+ @graph.node
313
+ def finalize(state: AgentState) -> AgentState:
314
+ client = get_claude_client()
315
+ if not client:
316
+ return state
317
+
318
+ # Construct the message based on history and current query
319
+ history_text = ""
320
+ if state.get("history"):
321
+ for entry in state["history"]:
322
+ if entry.get("agent") and entry.get("thought"):
323
+ history_text += f"\n## {entry['agent']} thought:\n{entry['thought']}\n"
324
+
325
+ message = client.messages.create(
326
+ model="claude-3-7-sonnet-20250219",
327
+ system="You are a thoughtful AI assistant that provides well-reasoned, comprehensive responses.",
328
+ messages=[
329
+ {
330
+ "role": "user",
331
+ "content": f"User query: {state['query']}\n\n"
332
+ f"Here is the complete thinking process that went into answering this query:\n{history_text}\n\n"
333
+ f"Based on all of this thinking, provide a final, comprehensive response to the user's query."
334
+ }
335
+ ],
336
+ temperature=0.1,
337
+ max_tokens=2000
338
+ )
339
+
340
+ final_response = message.content[0].text
341
+
342
+ return {
343
+ **state,
344
+ "final_response": final_response,
345
+ "current_agent": "done"
346
+ }
347
+
348
+ # Define the edges
349
+ graph.add_edge("initialize", "meta_agent")
350
+ graph.add_conditional_edges(
351
+ "meta_agent",
352
+ lambda state: state["current_agent"],
353
+ {
354
+ "researcher": "researcher",
355
+ "critic": "critic",
356
+ "synthesizer": "synthesizer",
357
+ "final": "finalize",
358
+ "meta_agent": "meta_agent" # For cases where meta agent needs another step
359
+ }
360
+ )
361
+ graph.add_edge("researcher", "meta_agent")
362
+ graph.add_edge("critic", "meta_agent")
363
+ graph.add_edge("synthesizer", "meta_agent")
364
+
365
+ # Compile the graph
366
+ compiled_graph = graph.compile()
367
+
368
+ return compiled_graph
369
+
370
+ # Function to run the agent graph
371
+ def run_agent_graph(query):
372
+ if not st.session_state.agent_graph:
373
+ st.session_state.agent_graph = create_agent_graph()
374
+
375
+ # Reset the current step counter
376
+ st.session_state.current_step = 0
377
+
378
+ # Clear previous thinking logs
379
+ st.session_state.thinking_logs = []
380
+
381
+ # Start with initial state
382
+ initial_state = {"query": query}
383
+
384
+ # Execute the graph with checkpointing
385
+ # We'll use the checkpoint functionality to track each step
386
+ @persist(to="memory")
387
+ def run_with_checkpoints(graph, initial_state):
388
+ return graph.run(initial_state)
389
+
390
+ result = run_with_checkpoints(st.session_state.agent_graph, initial_state)
391
+
392
+ # Process the result for display
393
+ if result and "history" in result:
394
+ for step in result["history"]:
395
+ st.session_state.thinking_logs.append(step)
396
+
397
+ # Return the final response
398
+ if result and "final_response" in result:
399
+ return result["final_response"]
400
+ else:
401
+ return "I wasn't able to generate a response. Please try again or check the settings."
402
+
403
+ # UI Layout
404
+ st.sidebar.title("🧠 Thinking Agent System")
405
+
406
+ # Tabs for different views
407
+ tabs = st.tabs(["Chat", "Thinking Process", "Agent Configuration"])
408
+
409
+ with tabs[0]: # Chat tab
410
+ st.header("Chat with the Thinking Agent")
411
+
412
+ # Display chat messages
413
+ for message in st.session_state.messages:
414
+ with st.chat_message(message["role"]):
415
+ st.write(message["content"])
416
+
417
+ # User input
418
+ if prompt := st.chat_input("What's on your mind?"):
419
+ # Add user message to chat history
420
+ st.session_state.messages.append({"role": "user", "content": prompt})
421
+
422
+ # Display user message
423
+ with st.chat_message("user"):
424
+ st.write(prompt)
425
+
426
+ # Show thinking indicator
427
+ with st.chat_message("assistant"):
428
+ with st.spinner("Thinking..."):
429
+ response = run_agent_graph(prompt)
430
+
431
+ # Display the response
432
+ st.write(response)
433
+
434
+ # Add assistant response to chat history
435
+ st.session_state.messages.append({"role": "assistant", "content": response})
436
+
437
+ with tabs[1]: # Thinking Process tab
438
+ st.header("Agent Thinking Process")
439
+
440
+ if not st.session_state.thinking_logs:
441
+ st.info("No thinking process to display yet. Start a conversation to see the agents at work.")
442
+ else:
443
+ for i, log in enumerate(st.session_state.thinking_logs):
444
+ step_num = i + 1
445
+ with st.expander(f"Step {step_num}: {log.get('agent', 'Unknown Agent')}", expanded=True):
446
+ st.markdown(log.get("thought", "No thought recorded"))
447
+
448
+ with tabs[2]: # Agent Configuration tab
449
+ st.header("Configure Agent Personas")
450
+
451
+ # Select agent to configure
452
+ selected_agent = st.selectbox(
453
+ "Select agent to configure:",
454
+ options=list(st.session_state.persona_configs.keys()),
455
+ format_func=lambda x: st.session_state.persona_configs[x]["name"]
456
+ )
457
+
458
+ # Edit the selected agent
459
+ if selected_agent:
460
+ with st.form(f"edit_{selected_agent}"):
461
+ st.subheader(f"Edit {st.session_state.persona_configs[selected_agent]['name']}")
462
+
463
+ name = st.text_input(
464
+ "Agent Name",
465
+ value=st.session_state.persona_configs[selected_agent]["name"]
466
+ )
467
+
468
+ description = st.text_area(
469
+ "Description",
470
+ value=st.session_state.persona_configs[selected_agent]["description"],
471
+ height=100
472
+ )
473
+
474
+ system_prompt = st.text_area(
475
+ "System Prompt",
476
+ value=st.session_state.persona_configs[selected_agent]["system_prompt"],
477
+ height=300
478
+ )
479
+
480
+ if st.form_submit_button("Save Changes"):
481
+ st.session_state.persona_configs[selected_agent]["name"] = name
482
+ st.session_state.persona_configs[selected_agent]["description"] = description
483
+ st.session_state.persona_configs[selected_agent]["system_prompt"] = system_prompt
484
+
485
+ # Recreate the agent graph with updated configs
486
+ st.session_state.agent_graph = create_agent_graph()
487
+
488
+ st.success(f"Updated {name} configuration successfully!")
489
+
490
+ # Settings in the sidebar
491
+ with st.sidebar.expander("⚙️ Settings"):
492
+ api_key = st.text_input(
493
+ "Anthropic API Key",
494
+ type="password",
495
+ value=st.session_state.get("anthropic_api_key", ""),
496
+ help="Enter your Anthropic API key here"
497
+ )
498
+
499
+ if api_key:
500
+ st.session_state.anthropic_api_key = api_key
501
+
502
+ if st.button("Test Connection"):
503
+ client = get_claude_client()
504
+ if client:
505
+ try:
506
+ response = client.messages.create(
507
+ model="claude-3-7-sonnet-20250219",
508
+ messages=[{"role": "user", "content": "Hello"}],
509
+ max_tokens=10
510
+ )
511
+ st.success("Connection successful!")
512
+ except Exception as e:
513
+ st.error(f"Connection failed: {str(e)}")
514
+
515
+ # Display some information about the app
516
+ with st.sidebar.expander("ℹ️ About"):
517
+ st.markdown("""
518
+ This app demonstrates a multi-agent thinking system powered by Claude 3.7.
519
+
520
+ The system uses multiple specialized agents with different perspectives to analyze problems:
521
+ - **Meta Agent**: Coordinates the thinking process
522
+ - **Researcher**: Explores the problem space thoroughly
523
+ - **Critic**: Identifies flaws in reasoning
524
+ - **Synthesizer**: Combines insights into a coherent response
525
+
526
+ You can observe the thinking process in the "Thinking Process" tab and configure the agents in the "Agent Configuration" tab.
527
+ """)
528
+
529
+ # Create the agent graph on app startup if it doesn't exist
530
+ if not st.session_state.agent_graph:
531
+ st.session_state.agent_graph = create_agent_graph()