jdesiree commited on
Commit
2ebfa62
·
verified ·
1 Parent(s): 636a860

Update graph_tool.py

Browse files
Files changed (1) hide show
  1. graph_tool.py +603 -108
graph_tool.py CHANGED
@@ -1,126 +1,621 @@
1
- import base64
2
- from io import BytesIO
3
- import matplotlib.pyplot as plt
4
- import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import json
6
 
7
- def generate_plot(plot_config_json):
 
8
  """
9
- Generates a plot (bar, line, or pie) and returns it as an HTML-formatted Base64-encoded image string.
10
-
11
- Args:
12
- plot_config_json (str): JSON string containing all plot configuration:
13
- {
14
- "data": {"key1": value1, "key2": value2, ...},
15
- "labels": ["label1", "label2", ...],
16
- "plot_type": "bar|line|pie",
17
- "title": "Plot Title",
18
- "x_label": "X Axis Label" (optional),
19
- "y_label": "Y Axis Label" (optional)
20
- }
21
-
22
- Returns:
23
- str: An HTML img tag with Base64-encoded plot image.
 
 
 
 
 
 
 
 
 
 
 
 
24
  """
25
  try:
26
- # Parse the main JSON configuration
27
- config = json.loads(plot_config_json)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  except json.JSONDecodeError as e:
29
- return f'<p style="color:red;">Error parsing JSON configuration: {e}</p>'
30
-
31
- # Extract parameters with defaults
32
- try:
33
- data = config.get("data", {})
34
- labels = config.get("labels", [])
35
- plot_type = config.get("plot_type", "bar")
36
- title = config.get("title", "Untitled Plot")
37
- x_label = config.get("x_label", "")
38
- y_label = config.get("y_label", "")
39
  except Exception as e:
40
- return f'<p style="color:red;">Error extracting configuration parameters: {e}</p>'
 
 
 
 
 
41
 
42
- # Validate inputs
43
- if not isinstance(data, dict):
44
- return '<p style="color:red;">Data must be a dictionary with keys as labels and values as numbers.</p>'
 
 
45
 
46
- if not isinstance(labels, list):
47
- return '<p style="color:red;">Labels must be a list.</p>'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  try:
50
- fig, ax = plt.subplots(figsize=(10, 6))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- # Extract keys and values from the data dictionary
53
- x_data = list(data.keys())
54
- y_data = list(data.values())
55
 
56
- # Ensure y_data contains numeric values
57
  try:
58
- y_data = [float(val) for val in y_data]
59
- except (ValueError, TypeError):
60
- return '<p style="color:red;">All data values must be numeric.</p>'
61
-
62
- if plot_type == 'bar':
63
- bars = ax.bar(x_data, y_data)
64
- ax.set_xlabel(x_label)
65
- ax.set_ylabel(y_label)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- # Add value labels on top of bars
68
- for bar, value in zip(bars, y_data):
69
- height = bar.get_height()
70
- ax.text(bar.get_x() + bar.get_width()/2., height,
71
- f'{value}', ha='center', va='bottom')
72
-
73
- elif plot_type == 'line':
74
- ax.plot(x_data, y_data, marker='o', linewidth=2, markersize=6)
75
- ax.set_xlabel(x_label)
76
- ax.set_ylabel(y_label)
77
- ax.grid(True, alpha=0.3)
 
78
 
79
- elif plot_type == 'pie':
80
- # For pie charts, use labels parameter if provided, otherwise use data keys
81
- pie_labels = labels if len(labels) == len(y_data) else x_data
82
- wedges, texts, autotexts = ax.pie(y_data, labels=pie_labels, autopct='%1.1f%%',
83
- startangle=90, textprops={'fontsize': 10})
84
- ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle
85
- # Don't set x/y labels for pie charts as they don't make sense
 
 
 
 
 
 
 
86
 
87
- else:
88
- return f'<p style="color:red;">Invalid plot_type: {plot_type}. Choose "bar", "line", or "pie".</p>'
89
-
90
- ax.set_title(title, fontsize=14, fontweight='bold', pad=20)
91
-
92
- # Improve layout
93
- plt.tight_layout()
94
-
95
- # Save plot to a BytesIO buffer in memory
96
- buf = BytesIO()
97
- plt.savefig(buf, format='png', bbox_inches='tight', dpi=150,
98
- facecolor='white', edgecolor='none')
99
- plt.close(fig) # Close the plot to free up memory
100
-
101
- # Encode the image data to a Base64 string
102
- img_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
103
-
104
- # Return HTML img tag with proper styling
105
- return f'''
106
- <div style="text-align: center; margin: 20px 0;">
107
- <img src="data:image/png;base64,{img_base64}"
108
- style="max-width: 100%; height: auto; border-radius: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.1);"
109
- alt="{title}" />
110
- </div>
111
- '''
112
-
 
 
 
 
 
 
 
 
 
 
 
113
  except Exception as e:
114
- plt.close('all') # Clean up any open figures
115
- return f'<p style="color:red;">Error generating plot: {str(e)}</p>'
116
-
117
- # Example usage:
118
- # plot_config = {
119
- # "data": {"A": 10, "B": 20, "C": 15},
120
- # "labels": ["Category A", "Category B", "Category C"],
121
- # "plot_type": "bar",
122
- # "title": "Sample Bar Chart",
123
- # "x_label": "Categories",
124
- # "y_label": "Values"
125
- # }
126
- # result = generate_plot_single_input(json.dumps(plot_config))
 
1
+ import gradio as gr
2
+ from graph_tool import generate_plot
3
+ from metrics import MimirMetrics
4
+ import os
5
+
6
+ os.environ['HF_HOME'] = '/tmp/huggingface'
7
+ os.environ['TRANSFORMERS_CACHE'] = '/tmp/huggingface'
8
+ os.environ['HF_DATASETS_CACHE'] = '/tmp/huggingface'
9
+
10
+ import time
11
+ from dotenv import load_dotenv
12
+ import logging
13
+ import re
14
+ from langchain_core.tools import tool
15
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
16
+ from langchain_core.prompts import ChatPromptTemplate
17
+ from langchain_core.runnables import RunnableBranch
18
+ from langgraph.prebuilt import create_react_agent
19
+ from langchain.memory import ConversationBufferWindowMemory
20
+ from typing import Optional, List, Any, Type
21
+ from pydantic import BaseModel, Field
22
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
23
+ import torch
24
+
25
+ load_dotenv(".env")
26
+ HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
27
+ print("Environment variables loaded.")
28
+
29
+ # --- Environment and Logging Setup ---
30
+ logging.basicConfig(level=logging.INFO)
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Support both token names for flexibility
34
+ hf_token = HF_TOKEN
35
+ if not hf_token:
36
+ logger.warning("Neither HF_TOKEN nor HUGGINGFACEHUB_API_TOKEN is set, the application may not work.")
37
+
38
+ metrics_tracker = MimirMetrics(save_file="Mimir_metrics.json")
39
+
40
  import json
41
 
42
+ @tool(return_direct=False)
43
+ def Create_Graph_Tool(graph_config: str) -> str:
44
  """
45
+ Creates educational graphs and charts to help explain concepts to students.
46
+
47
+ Use this tool ONLY when teaching concepts that would benefit from visual representation, such as:
48
+ - Mathematical functions and relationships (quadratic equations, exponential growth)
49
+ - Statistical distributions and data analysis (normal curves, survey results)
50
+ - Scientific trends and comparisons (temperature changes, population growth)
51
+ - Economic models and business metrics (profit over time, market shares)
52
+ - Grade distributions or performance analysis (test score ranges)
53
+ - Any quantitative concept that's clearer with visualization
54
+
55
+ Input should be a JSON string with this structure:
56
+ {
57
+ "data": {"Category A": 25, "Category B": 40, "Category C": 35},
58
+ "plot_type": "bar",
59
+ "title": "Student Performance by Subject",
60
+ "x_label": "Subjects",
61
+ "y_label": "Average Score",
62
+ "educational_context": "This visualization helps students see performance patterns across subjects"
63
+ }
64
+
65
+ Plot types:
66
+ - "bar": Best for comparing categories, showing distributions, or discrete data
67
+ - "line": Best for showing trends over time or continuous relationships
68
+ - "pie": Best for showing parts of a whole or proportions
69
+
70
+ Always create meaningful educational data that illustrates the concept you're teaching.
71
+ Include educational_context to explain why the visualization helps learning.
72
  """
73
  try:
74
+ # Validate it's proper JSON
75
+ config = json.loads(graph_config)
76
+
77
+ # Add educational context if provided
78
+ educational_context = config.get("educational_context", "")
79
+
80
+ # Call your generate_plot function
81
+ graph_html = generate_plot(graph_config)
82
+
83
+ # Add educational context if provided
84
+ if educational_context:
85
+ context_html = f'<div style="margin: 10px 0; padding: 10px; background: #f8f9fa; border-left: 4px solid #007bff; font-style: italic;">💡 {educational_context}</div>'
86
+ return context_html + graph_html
87
+
88
+ return graph_html
89
+
90
  except json.JSONDecodeError as e:
91
+ logger.error(f"Invalid JSON provided to graph tool: {e}")
92
+ return '<p style="color:red;">Graph generation failed - invalid JSON format</p>'
 
 
 
 
 
 
 
 
93
  except Exception as e:
94
+ logger.error(f"Error in graph generation: {e}")
95
+ return f'<p style="color:red;">Error creating graph: {str(e)}</p>'
96
+
97
+ # --- Tool Control Logic ---
98
+ class Tool_Manager:
99
+ """Manages when tools should be available"""
100
 
101
+ def __init__(self):
102
+ self.graph_keywords = [
103
+ 'graph', 'chart', 'plot', 'visualiz', 'diagram', 'show me',
104
+ 'data', 'statistic', 'trend', 'distribution', 'comparison'
105
+ ]
106
 
107
+ def needs_visualization(self, query: str) -> bool:
108
+ """Determine if query requires visualization tools"""
109
+ query_lower = query.lower()
110
+
111
+ # Check for explicit visualization requests
112
+ visualization_patterns = [
113
+ r'\b(graph|chart|plot|visualiz)\w*',
114
+ r'\bshow\s+me\s+(a\s+)?(graph|chart)',
115
+ r'\b(data|statistic)\w*\s+(analys|visual)',
116
+ r'\bcreate\s+(a\s+)?(graph|chart|diagram)',
117
+ r'\bmake\s+(a\s+)?(graph|chart)',
118
+ r'\bdraw\s+(a\s+)?(graph|chart)'
119
+ ]
120
+
121
+ for pattern in visualization_patterns:
122
+ if re.search(pattern, query_lower):
123
+ return True
124
+
125
+ return False
126
+
127
+ # --- System Prompt ---
128
+ SYSTEM_PROMPT = """You are Mimir, an expert multi-concept tutor designed to facilitate genuine learning and understanding. Your primary mission is to guide students through the learning process rather than providing direct answers to academic work.
129
+
130
+ ## Core Educational Principles
131
+ - Provide comprehensive, educational responses that help students truly understand concepts
132
+ - Use minimal formatting, with markdown bolding reserved for **key terms** only
133
+ - Prioritize teaching methodology over answer delivery
134
+ - Foster critical thinking and independent problem-solving skills
135
+
136
+ ## Tone and Communication Style
137
+ - Maintain an engaging, friendly tone appropriate for high school students
138
+ - Write at a reading level that is accessible yet intellectually stimulating
139
+ - Be supportive and encouraging without being condescending
140
+ - Never use crude language or content inappropriate for an educational setting
141
+ - Avoid preachy, judgmental, or accusatory language
142
+ - Skip flattery and respond directly to questions
143
+ - Do not use emojis or actions in asterisks unless specifically requested
144
+ - Present critiques and corrections kindly as educational opportunities
145
+
146
+ ## Academic Integrity Approach
147
+ You recognize that students may seek direct answers to homework, assignments, or test questions. Rather than providing complete solutions or making accusations about intent, you should:
148
+
149
+ - **Guide through processes**: Break down problems into conceptual components and teach underlying principles
150
+ - **Ask clarifying questions**: Understand what the student already knows and where their confusion lies
151
+ - **Provide similar examples**: Work through analogous problems that demonstrate the same concepts without directly solving their specific assignment
152
+ - **Encourage original thinking**: Help students develop their own reasoning and analytical skills
153
+ - **Suggest study strategies**: Recommend effective learning approaches for the subject matter
154
+
155
+ ## Visual Learning Enhancement
156
+ You have the ability to create graphs and charts to enhance your explanations. Use this capability proactively when:
157
+ - Explaining mathematical concepts (functions, distributions, relationships)
158
+ - Teaching statistical analysis or data interpretation
159
+ - Discussing scientific trends, patterns, or experimental results
160
+ - Comparing different options, outcomes, or scenarios
161
+ - Illustrating economic principles, business metrics, or financial concepts
162
+ - Showing survey results, demographic data, or research findings
163
+ - Demonstrating any concept where visualization aids comprehension
164
+
165
+ **Important**: Only use the graph tool when visualization would genuinely help explain a concept. For general conversation, explanations, or questions that don't involve data or relationships, respond normally without tools.
166
+
167
+ ## Response Guidelines
168
+ - **For math problems**: Explain concepts, provide formula derivations, and guide through problem-solving steps without computing final numerical answers
169
+ - **For multiple-choice questions**: Discuss the concepts being tested and help students understand how to analyze options rather than identifying the correct choice
170
+ - **For essays or written work**: Discuss research strategies, organizational techniques, and critical thinking approaches rather than providing content or thesis statements
171
+ - **For factual questions**: Provide educational context and encourage students to synthesize information rather than stating direct answers
172
+
173
+ ## Communication Guidelines
174
+ - Maintain a supportive, non-judgmental tone in all interactions
175
+ - Assume positive intent while redirecting toward genuine learning
176
+ - Use Socratic questioning to promote discovery and critical thinking
177
+ - Celebrate understanding and progress in the learning process
178
+ - Encourage students to explain their thinking and reasoning
179
+ - Provide honest, accurate feedback even when it may not be what the student wants to hear
180
+
181
+ Your goal is to be an educational partner who empowers students to succeed through understanding, not a service that completes their work for them."""
182
+
183
+ # --- LLM Class Unchanged ---
184
+ logger = logging.getLogger(__name__)
185
+
186
+ class Qwen25SmallLLM:
187
+ def __init__(self, model_path: str = "Qwen/Qwen2.5-3B-Instruct", use_4bit: bool = True):
188
+ logger.info(f"Loading model: {model_path} (use_4bit={use_4bit})")
189
+
190
+ try:
191
+ # Load tokenizer
192
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
193
+
194
+ if use_4bit:
195
+ quant_config = BitsAndBytesConfig(
196
+ load_in_4bit=True,
197
+ bnb_4bit_compute_dtype=torch.bfloat16,
198
+ bnb_4bit_use_double_quant=True,
199
+ bnb_4bit_quant_type="nf4"
200
+ )
201
+ logger.info("Using 4-bit quantization with BitsAndBytes")
202
+ else:
203
+ quant_config = BitsAndBytesConfig(
204
+ load_in_8bit=True,
205
+ llm_int8_enable_fp32_cpu_offload=True
206
+ )
207
+ logger.info("Using 8-bit quantization with BitsAndBytes")
208
+
209
+ # Try quantized load
210
+ self.model = AutoModelForCausalLM.from_pretrained(
211
+ model_path,
212
+ quantization_config=quant_config,
213
+ device_map="auto",
214
+ torch_dtype=torch.bfloat16,
215
+ trust_remote_code=True,
216
+ low_cpu_mem_usage=True
217
+ )
218
+
219
+ except Exception as e:
220
+ logger.warning(f"Quantized load failed, falling back: {e}")
221
+ self._load_fallback_model(model_path)
222
+
223
+ # Ensure pad token
224
+ if self.tokenizer.pad_token is None:
225
+ self.tokenizer.pad_token = self.tokenizer.eos_token
226
+
227
+ def _load_fallback_model(self, model_path: str):
228
+ """Fallback if quantization fails."""
229
+ self.model = AutoModelForCausalLM.from_pretrained(
230
+ model_path,
231
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
232
+ device_map="auto" if torch.cuda.is_available() else None,
233
+ trust_remote_code=True,
234
+ low_cpu_mem_usage=True
235
+ )
236
+
237
+ def invoke(self, prompt: str, stop: Optional[List[str]] = None) -> str:
238
+ try:
239
+ messages = [
240
+ {"role": "system", "content": SYSTEM_PROMPT},
241
+ {"role": "user", "content": prompt}
242
+ ]
243
+ text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
244
+
245
+ inputs = self.tokenizer([text], return_tensors="pt", padding=True, truncation=True, max_length=2048)
246
+ if torch.cuda.is_available():
247
+ inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
248
+
249
+ with torch.no_grad():
250
+ outputs = self.model.generate(
251
+ **inputs,
252
+ max_new_tokens=800,
253
+ do_sample=True,
254
+ temperature=0.7,
255
+ top_p=0.9,
256
+ top_k=50,
257
+ repetition_penalty=1.1,
258
+ pad_token_id=self.tokenizer.eos_token_id
259
+ )
260
+
261
+ new_tokens = [out[len(inp):] for inp, out in zip(inputs.input_ids, outputs)]
262
+ return self.tokenizer.batch_decode(new_tokens, skip_special_tokens=True)[0].strip()
263
+
264
+ except Exception as e:
265
+ logger.error(f"Generation error: {e}")
266
+ return f"[Error generating response: {str(e)}]"
267
+
268
+ # --- Modern Agent Implementation ---
269
+ class Educational_Agent:
270
+ """Modern LangChain agent with proper tool control"""
271
 
272
+ def __init__(self):
273
+ self.llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-3B-Instruct")
274
+ self.tool_manager = Tool_Manager()
275
+ self.memory = ConversationBufferWindowMemory(
276
+ memory_key="chat_history",
277
+ return_messages=True,
278
+ k=5
279
+ )
280
+
281
+ def should_use_tools(self, query: str) -> bool:
282
+ """Determine if this query needs visualization tools"""
283
+ return self.tool_manager.needs_visualization(query)
284
+
285
+ def create_prompt_template(self, has_tools: bool = False):
286
+ """Create prompt template based on whether tools are available"""
287
+ if has_tools:
288
+ system_content = SYSTEM_PROMPT + "\n\nYou have access to graph creation tools. Use them when visualization would help explain concepts."
289
+ else:
290
+ system_content = SYSTEM_PROMPT + "\n\nRespond using your knowledge without any tools."
291
+
292
+ return ChatPromptTemplate.from_messages([
293
+ ("system", system_content),
294
+ ("human", "{input}")
295
+ ])
296
+
297
+ def process_with_tools(self, query: str) -> str:
298
+ """Process query with tools available"""
299
+ try:
300
+ # Create agent with tools
301
+ tools = [Create_Graph_Tool]
302
+
303
+ # Use create_react_agent for better control
304
+ agent = create_react_agent(
305
+ self.llm,
306
+ tools,
307
+ state_modifier=self.create_prompt_template(has_tools=True)
308
+ )
309
+
310
+ response = agent.invoke({"messages": [HumanMessage(content=query)]})
311
+
312
+ # Extract the final message content
313
+ if response and "messages" in response:
314
+ final_message = response["messages"][-1]
315
+ if hasattr(final_message, 'content'):
316
+ return final_message.content
317
+ else:
318
+ return str(final_message)
319
+
320
+ return str(response)
321
+
322
+ except Exception as e:
323
+ logger.error(f"Error in tool processing: {e}")
324
+ return f"I apologize, but I encountered an error while processing your request: {str(e)}"
325
+
326
+ def process_without_tools(self, query: str) -> str:
327
+ """Process query without tools"""
328
+ try:
329
+ response = self.llm.invoke(query)
330
+ return response
331
+ except Exception as e:
332
+ logger.error(f"Error in normal processing: {e}")
333
+ return f"I apologize, but I encountered an error: {str(e)}"
334
+
335
+ def chat(self, message: str) -> str:
336
+ """Main chat interface with conditional tool usage"""
337
+ try:
338
+ # Determine if tools are needed
339
+ if self.should_use_tools(message):
340
+ logger.info("Query requires visualization - enabling tools")
341
+ return self.process_with_tools(message)
342
+ else:
343
+ logger.info("Query doesn't need tools - responding normally")
344
+ return self.process_without_tools(message)
345
+
346
+ except Exception as e:
347
+ logger.error(f"Error in chat processing: {e}")
348
+ return f"I apologize, but I encountered an error: {str(e)}"
349
+
350
+ # --- Global Agent Instance ---
351
+ agent = None
352
+
353
+ def get_agent():
354
+ """Get or create the educational agent."""
355
+ global agent
356
+ if agent is None:
357
+ agent = Educational_Agent()
358
+ return agent
359
+
360
+ # --- UI: MathJax Configuration ---
361
+ mathjax_config = '''
362
+ <script>
363
+ window.MathJax = {
364
+ tex: {
365
+ inlineMath: [['\\\\(', '\\\\)']],
366
+ displayMath: [['$', '$'], ['\\\\[', '\\\\]']],
367
+ packages: {'[+]': ['ams']}
368
+ },
369
+ svg: {fontCache: 'global'},
370
+ startup: {
371
+ ready: () => {
372
+ MathJax.startup.defaultReady();
373
+ // Re-render math when new content is added
374
+ const observer = new MutationObserver(function(mutations) {
375
+ MathJax.typesetPromise();
376
+ });
377
+ observer.observe(document.body, {childList: true, subtree: true});
378
+ }
379
+ }
380
+ };
381
+ </script>
382
+ '''
383
+
384
+ # --- HTML Head Content ---
385
+ html_head_content = '''
386
+ <meta charset="utf-8">
387
+ <meta name="viewport" content="width=device-width, initial-scale=1">
388
+ <title>Mimir - AI Educational Assistant</title>
389
+ '''
390
+
391
+ # --- Force Light Mode Script ---
392
+ force_light_mode = '''
393
+ <script>
394
+ // Force light theme in Gradio
395
+ window.addEventListener('DOMContentLoaded', function () {
396
+ const gradioURL = window.location.href;
397
+ const url = new URL(gradioURL);
398
+ const currentTheme = url.searchParams.get('__theme');
399
+
400
+ if (currentTheme !== 'light') {
401
+ url.searchParams.set('__theme', 'light');
402
+ window.location.replace(url.toString());
403
+ }
404
+ });
405
+ </script>
406
+ '''
407
+
408
+ # --- Core Logic Functions ---
409
+ def smart_truncate(text, max_length=3000):
410
+ """Truncates text intelligently to the last full sentence or word."""
411
+ if len(text) <= max_length:
412
+ return text
413
+
414
+ # Try to split by sentence
415
+ sentences = re.split(r'(?<=[.!?])\s+', text[:max_length])
416
+ if len(sentences) > 1:
417
+ return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
418
+ # Otherwise, split by word
419
+ words = text[:max_length].split()
420
+ return ' '.join(words[:-1]) + "... [Response truncated]"
421
+
422
+ def generate_response_with_agent(message, max_retries=3):
423
+ """Generate response using modern agent with proper tool control."""
424
+
425
+ for attempt in range(max_retries):
426
+ try:
427
+ # Get the agent
428
+ current_agent = get_agent()
429
+
430
+ # Use the agent's chat method with conditional tool usage
431
+ response = current_agent.chat(message)
432
+
433
+ return smart_truncate(response)
434
+
435
+ except Exception as e:
436
+ logger.error(f"Agent error (attempt {attempt + 1}): {e}")
437
+ if attempt < max_retries - 1:
438
+ time.sleep(2)
439
+ continue
440
+ else:
441
+ return f"I apologize, but I encountered an error while processing your message: {str(e)}"
442
+
443
+ def chat_response(message, history=None):
444
+ """Process chat message and return response."""
445
  try:
446
+ # Track metrics with timing context
447
+ start_time = time.time()
448
+ timing_context = {
449
+ 'start_time': start_time,
450
+ 'chunk_count': 0,
451
+ 'provider_latency': 0.0
452
+ }
453
+
454
+ try:
455
+ # Log start of interaction
456
+ metrics_tracker.log_interaction(
457
+ query=message,
458
+ response="",
459
+ timing_context=timing_context,
460
+ error_occurred=False
461
+ )
462
+ logger.info("Metrics interaction logged successfully")
463
+ except Exception as metrics_error:
464
+ logger.error(f"Error in metrics_tracker.log_interaction: {metrics_error}")
465
 
466
+ # Generate response with modern agent
467
+ response = generate_response_with_agent(message)
 
468
 
469
+ # Log final metrics
470
  try:
471
+ metrics_tracker.log_interaction(
472
+ query=message,
473
+ response=response,
474
+ timing_context=timing_context,
475
+ error_occurred=False
476
+ )
477
+ except Exception as metrics_error:
478
+ logger.error(f"Error in final metrics logging: {metrics_error}")
479
+
480
+ return response
481
+
482
+ except Exception as e:
483
+ logger.error(f"Error in chat_response: {e}")
484
+ return f"I apologize, but I encountered an error while processing your message: {str(e)}"
485
+
486
+ def respond_and_update(message, history):
487
+ """Main function to handle user submission."""
488
+ if not message.strip():
489
+ return history, ""
490
+
491
+ # Add user message to history
492
+ history.append({"role": "user", "content": message})
493
+ yield history, ""
494
+
495
+ # Generate response
496
+ response = chat_response(message)
497
+
498
+ history.append({"role": "assistant", "content": response})
499
+ yield history, ""
500
+
501
+ def clear_chat():
502
+ """Clear the chat history."""
503
+ global agent
504
+ if agent is not None:
505
+ agent.memory.clear()
506
+ return [], ""
507
+
508
+ def warmup_agent():
509
+ """Warm up the agent with a test query to preload everything."""
510
+ logger.info("Warming up agent with test query...")
511
+ try:
512
+ current_agent = get_agent()
513
+
514
+ # Run a simple test query
515
+ test_response = current_agent.chat("Hello, this is a warmup test.")
516
+ logger.info(f"Agent warmup completed successfully! Test response length: {len(test_response)} chars")
517
+
518
+ # Clear any test data from memory
519
+ current_agent.memory.clear()
520
+
521
+ except Exception as e:
522
+ logger.error(f"Agent warmup failed: {e}")
523
+
524
+ # --- UI: Interface Creation ---
525
+ def create_interface():
526
+ """Creates and configures the complete Gradio interface."""
527
+
528
+ # Read CSS file
529
+ custom_css = ""
530
+ try:
531
+ with open("styles.css", "r", encoding="utf-8") as css_file:
532
+ custom_css = css_file.read()
533
+ except FileNotFoundError:
534
+ logger.warning("styles.css file not found, using default styling")
535
+ except Exception as e:
536
+ logger.warning(f"Error reading styles.css: {e}")
537
+
538
+ with gr.Blocks(
539
+ title="Mimir",
540
+ fill_width=True,
541
+ fill_height=True,
542
+ theme=gr.themes.Origin()
543
+ ) as demo:
544
+ # Add head content and MathJax
545
+ gr.HTML(html_head_content)
546
+ gr.HTML(force_light_mode)
547
+ gr.HTML('<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>')
548
+ gr.HTML(mathjax_config)
549
+
550
+ with gr.Column(elem_classes=["main-container"]):
551
+ # Title Section
552
+ gr.HTML('<div class="title-header"><h1> Mimir 🎓</h1></div>')
553
 
554
+ # Chat Section
555
+ with gr.Row():
556
+ chatbot = gr.Chatbot(
557
+ type="messages",
558
+ show_copy_button=True,
559
+ show_share_button=False,
560
+ avatar_images=None,
561
+ elem_id="main-chatbot",
562
+ container=False,
563
+ scale=1,
564
+ height="70vh"
565
+ )
566
 
567
+ # Input Section
568
+ with gr.Row(elem_classes=["input-controls"]):
569
+ msg = gr.Textbox(
570
+ placeholder="Ask me about math, research, study strategies, or any educational topic...",
571
+ show_label=False,
572
+ lines=6,
573
+ max_lines=8,
574
+ elem_classes=["input-textbox"],
575
+ container=False,
576
+ scale=4
577
+ )
578
+ with gr.Column(elem_classes=["button-column"], scale=1):
579
+ send = gr.Button("Send", elem_classes=["send-button"], size="sm")
580
+ clear = gr.Button("Clear", elem_classes=["clear-button"], size="sm")
581
 
582
+ # Event handlers
583
+ msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg])
584
+ send.click(respond_and_update, [msg, chatbot], [chatbot, msg])
585
+ clear.click(clear_chat, outputs=[chatbot, msg])
586
+
587
+ # Apply CSS at the very end
588
+ gr.HTML(f'<style>{custom_css}</style>')
589
+
590
+ return demo
591
+
592
+ # --- Main Execution ---
593
+ if __name__ == "__main__":
594
+ try:
595
+ logger.info("=" * 50)
596
+ logger.info("Starting Mimir Application")
597
+ logger.info("=" * 50)
598
+
599
+ # Step 1: Preload the model and agent
600
+ logger.info("Loading AI model...")
601
+ start_time = time.time()
602
+ agent = Educational_Agent()
603
+ load_time = time.time() - start_time
604
+ logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
605
+
606
+ # Step 2: Warm up the model
607
+ logger.info("Warming up model...")
608
+ warmup_agent()
609
+
610
+ interface = create_interface()
611
+ interface.queue()
612
+ interface.launch(
613
+ server_name="0.0.0.0",
614
+ share=True,
615
+ debug=True,
616
+ favicon_path="assets/favicon.ico"
617
+ )
618
+
619
  except Exception as e:
620
+ logger.error(f"❌ Failed to launch Mimir: {e}")
621
+ raise