jdesiree commited on
Commit
2e85c0e
·
verified ·
1 Parent(s): 27e06fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -239
app.py CHANGED
@@ -11,11 +11,12 @@ import time
11
  from dotenv import load_dotenv
12
  import logging
13
  import re
14
- from langchain.tools import BaseTool
15
- from langchain.agents import initialize_agent, AgentType
 
 
 
16
  from langchain.memory import ConversationBufferWindowMemory
17
- from langchain.schema import SystemMessage
18
- from langchain.llms.base import LLM
19
  from typing import Optional, List, Any, Type
20
  from pydantic import BaseModel, Field
21
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
@@ -36,103 +37,117 @@ if not hf_token:
36
 
37
  metrics_tracker = MimirMetrics(save_file="Mimir_metrics.json")
38
 
39
- from langchain.tools import Tool
40
  import json
41
 
42
- def create_educational_graph_tool():
43
- """
44
- Creates a graph tool for the AI to autonomously generate educational visualizations.
45
  """
 
46
 
47
- def graph_wrapper(graph_config: str) -> str:
48
- """
49
- Wrapper that calls your generate_plot function with the JSON config.
50
-
51
- Args:
52
- graph_config (str): JSON string with plot configuration
53
-
54
- Returns:
55
- str: HTML with embedded graph
56
- """
57
- try:
58
- # Validate it's proper JSON
59
- config = json.loads(graph_config)
60
-
61
- # Add educational context if provided
62
- educational_context = config.get("educational_context", "")
63
-
64
- # Call your generate_plot function (which now takes single JSON input)
65
- graph_html = generate_plot(graph_config)
66
-
67
- # Add educational context if provided
68
- if educational_context:
69
- context_html = f'<div style="margin: 10px 0; padding: 10px; background: #f8f9fa; border-left: 4px solid #007bff; font-style: italic;">💡 {educational_context}</div>'
70
- return context_html + graph_html
71
-
72
- return graph_html
73
-
74
- except json.JSONDecodeError as e:
75
- logger.error(f"Invalid JSON provided to graph tool: {e}")
76
- return '<p style="color:red;">Graph generation failed - invalid JSON format</p>'
77
- except Exception as e:
78
- logger.error(f"Error in graph generation: {e}")
79
- return f'<p style="color:red;">Error creating graph: {str(e)}</p>'
80
 
81
- return Tool(
82
- name="create_graph",
83
- description="""Create educational graphs and charts to help explain concepts to students.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- Use this tool when teaching concepts that would benefit from visual representation, such as:
86
- - Mathematical functions and relationships (quadratic equations, exponential growth)
87
- - Statistical distributions and data analysis (normal curves, survey results)
88
- - Scientific trends and comparisons (temperature changes, population growth)
89
- - Economic models and business metrics (profit over time, market shares)
90
- - Grade distributions or performance analysis (test score ranges)
91
- - Any quantitative concept that's clearer with visualization
92
 
93
- Input should be a JSON string with this structure:
94
- {
95
- "data": {"Category A": 25, "Category B": 40, "Category C": 35},
96
- "plot_type": "bar",
97
- "title": "Student Performance by Subject",
98
- "x_label": "Subjects",
99
- "y_label": "Average Score",
100
- "educational_context": "This visualization helps students see performance patterns across subjects"
101
- }
102
 
103
- Plot types:
104
- - "bar": Best for comparing categories, showing distributions, or discrete data
105
- - "line": Best for showing trends over time or continuous relationships
106
- - "pie": Best for showing parts of a whole or proportions
107
 
108
- Always create meaningful educational data that illustrates the concept you're teaching.
109
- Include educational_context to explain why the visualization helps learning.
110
 
111
- Examples of when to use:
112
- - Student asks about probability Create histogram showing dice roll outcomes
113
- - Teaching compound interest Line chart showing money growth over time
114
- - Explaining survey methods → Pie chart of sample demographic breakdown
115
- - Discussing functions Line plot comparing linear vs exponential growth
116
- - Analyzing test performance Bar chart of score distributions
117
- """,
118
- func=graph_wrapper
119
- )
120
-
121
- # If you want to keep using your existing CreateGraphTool approach,
122
- # update it to work with the new single-input generate_plot function:
123
- class CreateGraphToolFixed(BaseTool):
124
- name: str = "create_graph"
125
- description: str = """Generate educational graphs to supplement explanations. Create realistic data that illustrates educational concepts."""
126
 
127
- def _run(self, tool_input: str) -> str:
128
- """Single input version that works with your updated generate_plot function."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  try:
130
- # tool_input should be the JSON config string
131
- return generate_plot(tool_input)
 
 
 
 
 
 
 
 
 
 
 
 
132
  except Exception as e:
133
- logger.error(f"Error in graph tool: {e}")
134
- return f'<p style="color:red;">Error creating graph: {str(e)}</p>'
135
-
136
 
137
  # --- System Prompt ---
138
  SYSTEM_PROMPT = """You are Mimir, an expert multi-concept tutor designed to facilitate genuine learning and understanding. Your primary mission is to guide students through the learning process rather than providing direct answers to academic work.
@@ -162,12 +177,8 @@ You recognize that students may seek direct answers to homework, assignments, or
162
  - **Encourage original thinking**: Help students develop their own reasoning and analytical skills
163
  - **Suggest study strategies**: Recommend effective learning approaches for the subject matter
164
 
165
- # Visual Learning Enhancement
166
-
167
- Thought: You should always think about what to do; do not use any tool if it is not needed.
168
-
169
  You have the ability to create graphs and charts to enhance your explanations. Use this capability proactively when:
170
-
171
  - Explaining mathematical concepts (functions, distributions, relationships)
172
  - Teaching statistical analysis or data interpretation
173
  - Discussing scientific trends, patterns, or experimental results
@@ -176,42 +187,13 @@ You have the ability to create graphs and charts to enhance your explanations. U
176
  - Showing survey results, demographic data, or research findings
177
  - Demonstrating any concept where visualization aids comprehension
178
 
179
- **When to create graphs:**
180
- - The concept involves numerical data or relationships
181
- - Visual representation would clarify a complex idea
182
- - Students benefit from seeing patterns or comparisons
183
- - You're teaching about graph interpretation itself
184
- - The topic involves trends, distributions, or proportions
185
- - A multiple choice question requires a visual, such as a graph, to be answered.
186
-
187
- **How to create graphs:**
188
- Generate realistic, educational data that illustrates your teaching point. Create meaningful examples that help students understand the underlying concepts, not just random numbers.
189
-
190
- Example: When explaining normal distribution, create a graph showing test scores distributed normally around a mean, with appropriate labels and educational context.
191
-
192
-
193
- ## Tool Usage
194
- You have access to a create_graph tool. Use this tool naturally when a visual representation would enhance understanding or when discussing concepts that involve data, relationships, patterns, or quantitative information. Consider creating graphs for:
195
- - Mathematical concepts (functions, distributions, relationships)
196
- - Statistical examples and explanations
197
- - Scientific data and relationships
198
- - Practice problems involving graph interpretation
199
- - Comparative analyses
200
- - Economic models or business concepts
201
- - Any situation where visualization aids comprehension
202
-
203
- When using the create_graph tool, format data as JSON strings:
204
- - data_json: '{"Category1": 25, "Category2": 40, "Category3": 35}'
205
- - labels_json: '["Category1", "Category2", "Category3"]'
206
-
207
- Thought: You should always think about what to do; do not use any tool if it is not needed.
208
 
209
  ## Response Guidelines
210
  - **For math problems**: Explain concepts, provide formula derivations, and guide through problem-solving steps without computing final numerical answers
211
  - **For multiple-choice questions**: Discuss the concepts being tested and help students understand how to analyze options rather than identifying the correct choice
212
  - **For essays or written work**: Discuss research strategies, organizational techniques, and critical thinking approaches rather than providing content or thesis statements
213
  - **For factual questions**: Provide educational context and encourage students to synthesize information rather than stating direct answers
214
- - Use graphs naturally when they would clarify or enhance your explanations
215
 
216
  ## Communication Guidelines
217
  - Maintain a supportive, non-judgmental tone in all interactions
@@ -223,26 +205,11 @@ Thought: You should always think about what to do; do not use any tool if it is
223
 
224
  Your goal is to be an educational partner who empowers students to succeed through understanding, not a service that completes their work for them."""
225
 
226
- # --- Improved LangChain Setup ---
227
- # Global flag to track system prompt initialization
228
- system_prompt_initialized = False
229
-
230
- def initialize_system_prompt(agent):
231
- """Initialize the system prompt as a SystemMessage in memory."""
232
- global system_prompt_initialized
233
- if not system_prompt_initialized:
234
- system_message = SystemMessage(content=SYSTEM_PROMPT)
235
- agent.memory.chat_memory.add_message(system_message)
236
- system_prompt_initialized = True
237
-
238
  logger = logging.getLogger(__name__)
239
 
240
- class Qwen25SmallLLM(LLM):
241
- model: Any = None
242
- tokenizer: Any = None
243
-
244
  def __init__(self, model_path: str = "Qwen/Qwen2.5-3B-Instruct", use_4bit: bool = True):
245
- super().__init__()
246
  logger.info(f"Loading model: {model_path} (use_4bit={use_4bit})")
247
 
248
  try:
@@ -292,7 +259,7 @@ class Qwen25SmallLLM(LLM):
292
  low_cpu_mem_usage=True
293
  )
294
 
295
- def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
296
  try:
297
  messages = [
298
  {"role": "system", "content": SYSTEM_PROMPT},
@@ -323,94 +290,98 @@ class Qwen25SmallLLM(LLM):
323
  logger.error(f"Generation error: {e}")
324
  return f"[Error generating response: {str(e)}]"
325
 
326
- @property
327
- def _llm_type(self) -> str:
328
- return "qwen25_small"
329
-
330
-
331
- # Example of how the AI should use the tool
332
- def example_usage_for_ai():
333
- """
334
- This shows how the AI should autonomously create graphs in its responses.
335
- The AI doesn't wait for user data - it creates meaningful educational examples.
336
- """
337
 
338
- # Example: Teaching about normal distribution
339
- example_config = {
340
- "data": {
341
- "Below 60": 5,
342
- "60-70": 15,
343
- "70-80": 25,
344
- "80-90": 35,
345
- "90-100": 20
346
- },
347
- "plot_type": "bar",
348
- "title": "Typical Test Score Distribution",
349
- "x_label": "Score Range",
350
- "y_label": "Number of Students",
351
- "educational_context": "This shows how test scores often follow a bell-curve pattern, with most students scoring in the middle range."
352
- }
353
 
354
- # Example: Teaching about compound interest
355
- compound_interest_example = {
356
- "data": {
357
- "Year 1": 1000,
358
- "Year 5": 1276,
359
- "Year 10": 1629,
360
- "Year 15": 2079,
361
- "Year 20": 2653
362
- },
363
- "plot_type": "line",
364
- "title": "Compound Interest Growth ($1000 at 5% Annual)",
365
- "x_label": "Time (Years)",
366
- "y_label": "Account Value ($)",
367
- "educational_context": "Notice how the growth accelerates over time - this is the power of compound interest!"
368
- }
369
 
370
- return "These examples show how the AI creates educational data to illustrate concepts"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
 
372
  # --- Global Agent Instance ---
373
  agent = None
374
 
375
  def get_agent():
376
- """Get or create the LangChain agent."""
377
  global agent
378
  if agent is None:
379
- agent = create_langchain_agent()
380
  return agent
381
 
382
- def create_langchain_agent():
383
- """Factory to build the LangChain agent with memory and tools."""
384
- try:
385
- # Initialize your LLM
386
- llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-3B-Instruct")
387
-
388
- # Memory
389
- memory = ConversationBufferWindowMemory(
390
- memory_key="chat_history",
391
- return_messages=True,
392
- k=5 # keep last 5 exchanges
393
- )
394
-
395
- # Tools (graph tool, etc.)
396
- tools = [create_educational_graph_tool()]
397
-
398
- # Initialize agent
399
- agent = initialize_agent(
400
- tools=tools,
401
- llm=llm,
402
- agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
403
- memory=memory,
404
- verbose=True,
405
- handle_parsing_errors=True
406
- )
407
-
408
- return agent
409
-
410
- except Exception as e:
411
- logger.error(f"Error creating LangChain agent: {e}")
412
- raise
413
-
414
  # --- UI: MathJax Configuration ---
415
  mathjax_config = '''
416
  <script>
@@ -473,24 +444,21 @@ def smart_truncate(text, max_length=3000):
473
  words = text[:max_length].split()
474
  return ' '.join(words[:-1]) + "... [Response truncated]"
475
 
476
- def generate_response_with_langchain(message, max_retries=3):
477
- """Generate response using LangChain agent with proper message handling."""
478
 
479
  for attempt in range(max_retries):
480
  try:
481
  # Get the agent
482
  current_agent = get_agent()
483
 
484
- # Initialize system prompt if not already done
485
- initialize_system_prompt(current_agent)
486
-
487
- # Use the agent directly with the message
488
- response = current_agent.run(input=message)
489
 
490
  return smart_truncate(response)
491
 
492
  except Exception as e:
493
- logger.error(f"LangChain error (attempt {attempt + 1}): {e}")
494
  if attempt < max_retries - 1:
495
  time.sleep(2)
496
  continue
@@ -520,8 +488,8 @@ def chat_response(message, history=None):
520
  except Exception as metrics_error:
521
  logger.error(f"Error in metrics_tracker.log_interaction: {metrics_error}")
522
 
523
- # Generate response with LangChain
524
- response = generate_response_with_langchain(message)
525
 
526
  # Log final metrics
527
  try:
@@ -541,7 +509,7 @@ def chat_response(message, history=None):
541
  return f"I apologize, but I encountered an error while processing your message: {str(e)}"
542
 
543
  def respond_and_update(message, history):
544
- """Main function to handle user submission - no voice parameter."""
545
  if not message.strip():
546
  return history, ""
547
 
@@ -556,11 +524,10 @@ def respond_and_update(message, history):
556
  yield history, ""
557
 
558
  def clear_chat():
559
- """Clear the chat history and reset system prompt flag."""
560
- global agent, system_prompt_initialized
561
  if agent is not None:
562
  agent.memory.clear()
563
- system_prompt_initialized = False
564
  return [], ""
565
 
566
  def warmup_agent():
@@ -569,14 +536,11 @@ def warmup_agent():
569
  try:
570
  current_agent = get_agent()
571
 
572
- # Initialize system prompt
573
- initialize_system_prompt(current_agent)
574
-
575
  # Run a simple test query
576
- test_response = current_agent.run(input="Hello, this is a warmup test.")
577
  logger.info(f"Agent warmup completed successfully! Test response length: {len(test_response)} chars")
578
 
579
- # Clear the test interaction from memory
580
  current_agent.memory.clear()
581
 
582
  except Exception as e:
@@ -640,7 +604,7 @@ def create_interface():
640
  send = gr.Button("Send", elem_classes=["send-button"], size="sm")
641
  clear = gr.Button("Clear", elem_classes=["clear-button"], size="sm")
642
 
643
- # Event handlers - no voice parameter
644
  msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg])
645
  send.click(respond_and_update, [msg, chatbot], [chatbot, msg])
646
  clear.click(clear_chat, outputs=[chatbot, msg])
@@ -651,7 +615,6 @@ def create_interface():
651
  return demo
652
 
653
  # --- Main Execution ---
654
- # At the end of your app.py file, replace the main execution block:
655
  if __name__ == "__main__":
656
  try:
657
  logger.info("=" * 50)
@@ -661,7 +624,7 @@ if __name__ == "__main__":
661
  # Step 1: Preload the model and agent
662
  logger.info("Loading AI model...")
663
  start_time = time.time()
664
- agent = create_langchain_agent()
665
  load_time = time.time() - start_time
666
  logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
667
 
 
11
  from dotenv import load_dotenv
12
  import logging
13
  import re
14
+ from langchain_core.tools import tool
15
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
16
+ from langchain_core.prompts import ChatPromptTemplate
17
+ from langchain_core.runnables import RunnableBranch
18
+ from langgraph.prebuilt import create_react_agent
19
  from langchain.memory import ConversationBufferWindowMemory
 
 
20
  from typing import Optional, List, Any, Type
21
  from pydantic import BaseModel, Field
22
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
 
37
 
38
  metrics_tracker = MimirMetrics(save_file="Mimir_metrics.json")
39
 
 
40
  import json
41
 
42
+ @tool(return_direct=False)
43
+ def Create_Graph_Tool(graph_config: str) -> str:
 
44
  """
45
+ Creates educational graphs and charts to help explain concepts to students.
46
 
47
+ Use this tool ONLY when teaching concepts that would benefit from visual representation, such as:
48
+ - Mathematical functions and relationships (quadratic equations, exponential growth)
49
+ - Statistical distributions and data analysis (normal curves, survey results)
50
+ - Scientific trends and comparisons (temperature changes, population growth)
51
+ - Economic models and business metrics (profit over time, market shares)
52
+ - Grade distributions or performance analysis (test score ranges)
53
+ - Any quantitative concept that's clearer with visualization
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ Input should be a JSON string with this structure:
56
+ {
57
+ "data": {"Category A": 25, "Category B": 40, "Category C": 35},
58
+ "plot_type": "bar",
59
+ "title": "Student Performance by Subject",
60
+ "x_label": "Subjects",
61
+ "y_label": "Average Score",
62
+ "educational_context": "This visualization helps students see performance patterns across subjects"
63
+ }
64
+
65
+ Plot types:
66
+ - "bar": Best for comparing categories, showing distributions, or discrete data
67
+ - "line": Best for showing trends over time or continuous relationships
68
+ - "pie": Best for showing parts of a whole or proportions
69
+
70
+ Always create meaningful educational data that illustrates the concept you're teaching.
71
+ Include educational_context to explain why the visualization helps learning.
72
+ """
73
+ try:
74
+ # Validate it's proper JSON
75
+ config = json.loads(graph_config)
76
 
77
+ # Add educational context if provided
78
+ educational_context = config.get("educational_context", "")
 
 
 
 
 
79
 
80
+ # Call your generate_plot function
81
+ graph_html = generate_plot(graph_config)
 
 
 
 
 
 
 
82
 
83
+ # Add educational context if provided
84
+ if educational_context:
85
+ context_html = f'<div style="margin: 10px 0; padding: 10px; background: #f8f9fa; border-left: 4px solid #007bff; font-style: italic;">💡 {educational_context}</div>'
86
+ return context_html + graph_html
87
 
88
+ return graph_html
 
89
 
90
+ except json.JSONDecodeError as e:
91
+ logger.error(f"Invalid JSON provided to graph tool: {e}")
92
+ return '<p style="color:red;">Graph generation failed - invalid JSON format</p>'
93
+ except Exception as e:
94
+ logger.error(f"Error in graph generation: {e}")
95
+ return f'<p style="color:red;">Error creating graph: {str(e)}</p>'
96
+
97
+ # --- Tool Decision Engine ---
98
+ class Tool_Decision_Engine:
99
+ """Uses LLM to intelligently decide when visualization tools would be beneficial"""
 
 
 
 
 
100
 
101
+ def __init__(self, llm):
102
+ self.decision_llm = llm
103
+ self.decision_prompt = """Analyze this educational query and determine if creating a graph, chart, or visual representation would significantly enhance learning and understanding.
104
+
105
+ Query: "{query}"
106
+
107
+ Consider these factors:
108
+ 1. Would visualization make a concept clearer or easier to understand?
109
+ 2. Does the topic involve data, relationships, comparisons, or trends?
110
+ 3. Could a graph help illustrate abstract concepts concretely?
111
+ 4. For practice questions, would including visual elements be educational?
112
+
113
+ Examples that BENEFIT from visualization:
114
+ - Explaining mathematical functions or statistical concepts
115
+ - Creating practice questions that involve data interpretation
116
+ - Teaching about scientific trends or relationships
117
+ - Comparing quantities, performance, or outcomes
118
+ - Illustrating economic principles or business metrics
119
+
120
+ Examples that do NOT need visualization:
121
+ - Simple definitions or explanations
122
+ - General conversation or greetings
123
+ - Text-based study strategies
124
+ - Qualitative discussions without data
125
+
126
+ Answer with exactly: YES or NO
127
+
128
+ Decision:"""
129
+
130
+ def should_use_visualization(self, query: str) -> bool:
131
+ """Use LLM reasoning to determine if visualization would be beneficial"""
132
  try:
133
+ # Create decision prompt
134
+ decision_query = self.decision_prompt.format(query=query)
135
+
136
+ # Get LLM decision
137
+ decision_response = self.decision_llm.invoke(decision_query)
138
+
139
+ # Parse response - look for YES/NO
140
+ decision_text = decision_response.strip().upper()
141
+
142
+ # Log the decision for debugging
143
+ logger.info(f"Tool decision for '{query[:50]}...': {decision_text}")
144
+
145
+ return "YES" in decision_text and "NO" not in decision_text
146
+
147
  except Exception as e:
148
+ logger.error(f"Error in tool decision making: {e}")
149
+ # Default to no tools if decision fails
150
+ return False
151
 
152
  # --- System Prompt ---
153
  SYSTEM_PROMPT = """You are Mimir, an expert multi-concept tutor designed to facilitate genuine learning and understanding. Your primary mission is to guide students through the learning process rather than providing direct answers to academic work.
 
177
  - **Encourage original thinking**: Help students develop their own reasoning and analytical skills
178
  - **Suggest study strategies**: Recommend effective learning approaches for the subject matter
179
 
180
+ ## Visual Learning Enhancement
 
 
 
181
  You have the ability to create graphs and charts to enhance your explanations. Use this capability proactively when:
 
182
  - Explaining mathematical concepts (functions, distributions, relationships)
183
  - Teaching statistical analysis or data interpretation
184
  - Discussing scientific trends, patterns, or experimental results
 
187
  - Showing survey results, demographic data, or research findings
188
  - Demonstrating any concept where visualization aids comprehension
189
 
190
+ **Important**: Only use the graph tool when visualization would genuinely help explain a concept. For general conversation, explanations, or questions that don't involve data or relationships, respond normally without tools.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
  ## Response Guidelines
193
  - **For math problems**: Explain concepts, provide formula derivations, and guide through problem-solving steps without computing final numerical answers
194
  - **For multiple-choice questions**: Discuss the concepts being tested and help students understand how to analyze options rather than identifying the correct choice
195
  - **For essays or written work**: Discuss research strategies, organizational techniques, and critical thinking approaches rather than providing content or thesis statements
196
  - **For factual questions**: Provide educational context and encourage students to synthesize information rather than stating direct answers
 
197
 
198
  ## Communication Guidelines
199
  - Maintain a supportive, non-judgmental tone in all interactions
 
205
 
206
  Your goal is to be an educational partner who empowers students to succeed through understanding, not a service that completes their work for them."""
207
 
208
+ # --- LLM Class Unchanged ---
 
 
 
 
 
 
 
 
 
 
 
209
  logger = logging.getLogger(__name__)
210
 
211
+ class Qwen25SmallLLM:
 
 
 
212
  def __init__(self, model_path: str = "Qwen/Qwen2.5-3B-Instruct", use_4bit: bool = True):
 
213
  logger.info(f"Loading model: {model_path} (use_4bit={use_4bit})")
214
 
215
  try:
 
259
  low_cpu_mem_usage=True
260
  )
261
 
262
+ def invoke(self, prompt: str, stop: Optional[List[str]] = None) -> str:
263
  try:
264
  messages = [
265
  {"role": "system", "content": SYSTEM_PROMPT},
 
290
  logger.error(f"Generation error: {e}")
291
  return f"[Error generating response: {str(e)}]"
292
 
293
+ # --- Modern Agent Implementation ---
294
+ class Educational_Agent:
295
+ """Modern LangChain agent with LLM-based tool decision making"""
 
 
 
 
 
 
 
 
296
 
297
+ def __init__(self):
298
+ self.llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-3B-Instruct")
299
+ self.tool_decision_engine = Tool_Decision_Engine(self.llm)
300
+ self.memory = ConversationBufferWindowMemory(
301
+ memory_key="chat_history",
302
+ return_messages=True,
303
+ k=5
304
+ )
305
+
306
+ def should_use_tools(self, query: str) -> bool:
307
+ """Use LLM reasoning to determine if tools are needed"""
308
+ return self.tool_decision_engine.should_use_visualization(query)
 
 
 
309
 
310
+ def create_prompt_template(self, has_tools: bool = False):
311
+ """Create prompt template based on whether tools are available"""
312
+ if has_tools:
313
+ system_content = SYSTEM_PROMPT + "\n\nYou have access to graph creation tools. Use them when visualization would help explain concepts."
314
+ else:
315
+ system_content = SYSTEM_PROMPT + "\n\nRespond using your knowledge without any tools."
316
+
317
+ return ChatPromptTemplate.from_messages([
318
+ ("system", system_content),
319
+ ("human", "{input}")
320
+ ])
 
 
 
 
321
 
322
+ def process_with_tools(self, query: str) -> str:
323
+ """Process query with tools available"""
324
+ try:
325
+ # Create agent with tools
326
+ tools = [Create_Graph_Tool]
327
+
328
+ # Use create_react_agent for better control
329
+ agent = create_react_agent(
330
+ self.llm,
331
+ tools,
332
+ state_modifier=self.create_prompt_template(has_tools=True)
333
+ )
334
+
335
+ response = agent.invoke({"messages": [HumanMessage(content=query)]})
336
+
337
+ # Extract the final message content
338
+ if response and "messages" in response:
339
+ final_message = response["messages"][-1]
340
+ if hasattr(final_message, 'content'):
341
+ return final_message.content
342
+ else:
343
+ return str(final_message)
344
+
345
+ return str(response)
346
+
347
+ except Exception as e:
348
+ logger.error(f"Error in tool processing: {e}")
349
+ return f"I apologize, but I encountered an error while processing your request: {str(e)}"
350
+
351
+ def process_without_tools(self, query: str) -> str:
352
+ """Process query without tools"""
353
+ try:
354
+ response = self.llm.invoke(query)
355
+ return response
356
+ except Exception as e:
357
+ logger.error(f"Error in normal processing: {e}")
358
+ return f"I apologize, but I encountered an error: {str(e)}"
359
+
360
+ def chat(self, message: str) -> str:
361
+ """Main chat interface with conditional tool usage"""
362
+ try:
363
+ # Determine if tools are needed
364
+ if self.should_use_tools(message):
365
+ logger.info("Query requires visualization - enabling tools")
366
+ return self.process_with_tools(message)
367
+ else:
368
+ logger.info("Query doesn't need tools - responding normally")
369
+ return self.process_without_tools(message)
370
+
371
+ except Exception as e:
372
+ logger.error(f"Error in chat processing: {e}")
373
+ return f"I apologize, but I encountered an error: {str(e)}"
374
 
375
  # --- Global Agent Instance ---
376
  agent = None
377
 
378
  def get_agent():
379
+ """Get or create the educational agent."""
380
  global agent
381
  if agent is None:
382
+ agent = Educational_Agent()
383
  return agent
384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
  # --- UI: MathJax Configuration ---
386
  mathjax_config = '''
387
  <script>
 
444
  words = text[:max_length].split()
445
  return ' '.join(words[:-1]) + "... [Response truncated]"
446
 
447
+ def generate_response_with_agent(message, max_retries=3):
448
+ """Generate response using modern agent with proper tool control."""
449
 
450
  for attempt in range(max_retries):
451
  try:
452
  # Get the agent
453
  current_agent = get_agent()
454
 
455
+ # Use the agent's chat method with conditional tool usage
456
+ response = current_agent.chat(message)
 
 
 
457
 
458
  return smart_truncate(response)
459
 
460
  except Exception as e:
461
+ logger.error(f"Agent error (attempt {attempt + 1}): {e}")
462
  if attempt < max_retries - 1:
463
  time.sleep(2)
464
  continue
 
488
  except Exception as metrics_error:
489
  logger.error(f"Error in metrics_tracker.log_interaction: {metrics_error}")
490
 
491
+ # Generate response with modern agent
492
+ response = generate_response_with_agent(message)
493
 
494
  # Log final metrics
495
  try:
 
509
  return f"I apologize, but I encountered an error while processing your message: {str(e)}"
510
 
511
  def respond_and_update(message, history):
512
+ """Main function to handle user submission."""
513
  if not message.strip():
514
  return history, ""
515
 
 
524
  yield history, ""
525
 
526
  def clear_chat():
527
+ """Clear the chat history."""
528
+ global agent
529
  if agent is not None:
530
  agent.memory.clear()
 
531
  return [], ""
532
 
533
  def warmup_agent():
 
536
  try:
537
  current_agent = get_agent()
538
 
 
 
 
539
  # Run a simple test query
540
+ test_response = current_agent.chat("Hello, this is a warmup test.")
541
  logger.info(f"Agent warmup completed successfully! Test response length: {len(test_response)} chars")
542
 
543
+ # Clear any test data from memory
544
  current_agent.memory.clear()
545
 
546
  except Exception as e:
 
604
  send = gr.Button("Send", elem_classes=["send-button"], size="sm")
605
  clear = gr.Button("Clear", elem_classes=["clear-button"], size="sm")
606
 
607
+ # Event handlers
608
  msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg])
609
  send.click(respond_and_update, [msg, chatbot], [chatbot, msg])
610
  clear.click(clear_chat, outputs=[chatbot, msg])
 
615
  return demo
616
 
617
  # --- Main Execution ---
 
618
  if __name__ == "__main__":
619
  try:
620
  logger.info("=" * 50)
 
624
  # Step 1: Preload the model and agent
625
  logger.info("Loading AI model...")
626
  start_time = time.time()
627
+ agent = Educational_Agent()
628
  load_time = time.time() - start_time
629
  logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
630