Spaces:
Sleeping
Sleeping
Update graph_tool.py
Browse files- graph_tool.py +110 -630
graph_tool.py
CHANGED
|
@@ -1,646 +1,126 @@
|
|
| 1 |
-
import
|
| 2 |
-
from
|
| 3 |
-
|
| 4 |
-
import
|
| 5 |
-
|
| 6 |
-
os.environ['HF_HOME'] = '/tmp/huggingface'
|
| 7 |
-
os.environ['TRANSFORMERS_CACHE'] = '/tmp/huggingface'
|
| 8 |
-
os.environ['HF_DATASETS_CACHE'] = '/tmp/huggingface'
|
| 9 |
-
|
| 10 |
-
import time
|
| 11 |
-
from dotenv import load_dotenv
|
| 12 |
-
import logging
|
| 13 |
-
import re
|
| 14 |
-
from langchain_core.tools import tool
|
| 15 |
-
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
|
| 16 |
-
from langchain_core.prompts import ChatPromptTemplate
|
| 17 |
-
from langchain_core.runnables import RunnableBranch
|
| 18 |
-
from langgraph.prebuilt import create_react_agent
|
| 19 |
-
from langchain.memory import ConversationBufferWindowMemory
|
| 20 |
-
from typing import Optional, List, Any, Type
|
| 21 |
-
from pydantic import BaseModel, Field
|
| 22 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 23 |
-
import torch
|
| 24 |
-
|
| 25 |
-
load_dotenv(".env")
|
| 26 |
-
HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 27 |
-
print("Environment variables loaded.")
|
| 28 |
-
|
| 29 |
-
# --- Environment and Logging Setup ---
|
| 30 |
-
logging.basicConfig(level=logging.INFO)
|
| 31 |
-
logger = logging.getLogger(__name__)
|
| 32 |
-
|
| 33 |
-
# Support both token names for flexibility
|
| 34 |
-
hf_token = HF_TOKEN
|
| 35 |
-
if not hf_token:
|
| 36 |
-
logger.warning("Neither HF_TOKEN nor HUGGINGFACEHUB_API_TOKEN is set, the application may not work.")
|
| 37 |
-
|
| 38 |
-
metrics_tracker = MimirMetrics(save_file="Mimir_metrics.json")
|
| 39 |
-
|
| 40 |
import json
|
| 41 |
|
| 42 |
-
|
| 43 |
-
def Create_Graph_Tool(graph_config: str) -> str:
|
| 44 |
"""
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
"x_label": "Subjects",
|
| 61 |
-
"y_label": "Average Score",
|
| 62 |
-
"educational_context": "This visualization helps students see performance patterns across subjects"
|
| 63 |
-
}
|
| 64 |
-
|
| 65 |
-
Plot types:
|
| 66 |
-
- "bar": Best for comparing categories, showing distributions, or discrete data
|
| 67 |
-
- "line": Best for showing trends over time or continuous relationships
|
| 68 |
-
- "pie": Best for showing parts of a whole or proportions
|
| 69 |
-
|
| 70 |
-
Always create meaningful educational data that illustrates the concept you're teaching.
|
| 71 |
-
Include educational_context to explain why the visualization helps learning.
|
| 72 |
"""
|
| 73 |
try:
|
| 74 |
-
#
|
| 75 |
-
config = json.loads(
|
| 76 |
-
|
| 77 |
-
# Add educational context if provided
|
| 78 |
-
educational_context = config.get("educational_context", "")
|
| 79 |
-
|
| 80 |
-
# Call your generate_plot function
|
| 81 |
-
graph_html = generate_plot(graph_config)
|
| 82 |
-
|
| 83 |
-
# Add educational context if provided
|
| 84 |
-
if educational_context:
|
| 85 |
-
context_html = f'<div style="margin: 10px 0; padding: 10px; background: #f8f9fa; border-left: 4px solid #007bff; font-style: italic;">💡 {educational_context}</div>'
|
| 86 |
-
return context_html + graph_html
|
| 87 |
-
|
| 88 |
-
return graph_html
|
| 89 |
-
|
| 90 |
except json.JSONDecodeError as e:
|
| 91 |
-
|
| 92 |
-
return '<p style="color:red;">Graph generation failed - invalid JSON format</p>'
|
| 93 |
-
except Exception as e:
|
| 94 |
-
logger.error(f"Error in graph generation: {e}")
|
| 95 |
-
return f'<p style="color:red;">Error creating graph: {str(e)}</p>'
|
| 96 |
-
|
| 97 |
-
# --- Tool Decision Engine ---
|
| 98 |
-
class Tool_Decision_Engine:
|
| 99 |
-
"""Uses LLM to intelligently decide when visualization tools would be beneficial"""
|
| 100 |
-
|
| 101 |
-
def __init__(self, llm):
|
| 102 |
-
self.decision_llm = llm
|
| 103 |
-
self.decision_prompt = """Analyze this educational query and determine if creating a graph, chart, or visual representation would significantly enhance learning and understanding.
|
| 104 |
-
|
| 105 |
-
Query: "{query}"
|
| 106 |
-
|
| 107 |
-
Consider these factors:
|
| 108 |
-
1. Would visualization make a concept clearer or easier to understand?
|
| 109 |
-
2. Does the topic involve data, relationships, comparisons, or trends?
|
| 110 |
-
3. Could a graph help illustrate abstract concepts concretely?
|
| 111 |
-
4. For practice questions, would including visual elements be educational?
|
| 112 |
-
|
| 113 |
-
Examples that BENEFIT from visualization:
|
| 114 |
-
- Explaining mathematical functions or statistical concepts
|
| 115 |
-
- Creating practice questions that involve data interpretation
|
| 116 |
-
- Teaching about scientific trends or relationships
|
| 117 |
-
- Comparing quantities, performance, or outcomes
|
| 118 |
-
- Illustrating economic principles or business metrics
|
| 119 |
-
|
| 120 |
-
Examples that do NOT need visualization:
|
| 121 |
-
- Simple definitions or explanations
|
| 122 |
-
- General conversation or greetings
|
| 123 |
-
- Text-based study strategies
|
| 124 |
-
- Qualitative discussions without data
|
| 125 |
-
|
| 126 |
-
Answer with exactly: YES or NO
|
| 127 |
-
|
| 128 |
-
Decision:"""
|
| 129 |
-
|
| 130 |
-
def should_use_visualization(self, query: str) -> bool:
|
| 131 |
-
"""Use LLM reasoning to determine if visualization would be beneficial"""
|
| 132 |
-
try:
|
| 133 |
-
# Create decision prompt
|
| 134 |
-
decision_query = self.decision_prompt.format(query=query)
|
| 135 |
-
|
| 136 |
-
# Get LLM decision
|
| 137 |
-
decision_response = self.decision_llm.invoke(decision_query)
|
| 138 |
-
|
| 139 |
-
# Parse response - look for YES/NO
|
| 140 |
-
decision_text = decision_response.strip().upper()
|
| 141 |
-
|
| 142 |
-
# Log the decision for debugging
|
| 143 |
-
logger.info(f"Tool decision for '{query[:50]}...': {decision_text}")
|
| 144 |
-
|
| 145 |
-
return "YES" in decision_text and "NO" not in decision_text
|
| 146 |
-
|
| 147 |
-
except Exception as e:
|
| 148 |
-
logger.error(f"Error in tool decision making: {e}")
|
| 149 |
-
# Default to no tools if decision fails
|
| 150 |
-
return False
|
| 151 |
-
|
| 152 |
-
# --- System Prompt ---
|
| 153 |
-
SYSTEM_PROMPT = """You are Mimir, an expert multi-concept tutor designed to facilitate genuine learning and understanding. Your primary mission is to guide students through the learning process rather than providing direct answers to academic work.
|
| 154 |
-
|
| 155 |
-
## Core Educational Principles
|
| 156 |
-
- Provide comprehensive, educational responses that help students truly understand concepts
|
| 157 |
-
- Use minimal formatting, with markdown bolding reserved for **key terms** only
|
| 158 |
-
- Prioritize teaching methodology over answer delivery
|
| 159 |
-
- Foster critical thinking and independent problem-solving skills
|
| 160 |
-
|
| 161 |
-
## Tone and Communication Style
|
| 162 |
-
- Maintain an engaging, friendly tone appropriate for high school students
|
| 163 |
-
- Write at a reading level that is accessible yet intellectually stimulating
|
| 164 |
-
- Be supportive and encouraging without being condescending
|
| 165 |
-
- Never use crude language or content inappropriate for an educational setting
|
| 166 |
-
- Avoid preachy, judgmental, or accusatory language
|
| 167 |
-
- Skip flattery and respond directly to questions
|
| 168 |
-
- Do not use emojis or actions in asterisks unless specifically requested
|
| 169 |
-
- Present critiques and corrections kindly as educational opportunities
|
| 170 |
-
|
| 171 |
-
## Academic Integrity Approach
|
| 172 |
-
You recognize that students may seek direct answers to homework, assignments, or test questions. Rather than providing complete solutions or making accusations about intent, you should:
|
| 173 |
-
|
| 174 |
-
- **Guide through processes**: Break down problems into conceptual components and teach underlying principles
|
| 175 |
-
- **Ask clarifying questions**: Understand what the student already knows and where their confusion lies
|
| 176 |
-
- **Provide similar examples**: Work through analogous problems that demonstrate the same concepts without directly solving their specific assignment
|
| 177 |
-
- **Encourage original thinking**: Help students develop their own reasoning and analytical skills
|
| 178 |
-
- **Suggest study strategies**: Recommend effective learning approaches for the subject matter
|
| 179 |
-
|
| 180 |
-
## Visual Learning Enhancement
|
| 181 |
-
You have the ability to create graphs and charts to enhance your explanations. Use this capability proactively when:
|
| 182 |
-
- Explaining mathematical concepts (functions, distributions, relationships)
|
| 183 |
-
- Teaching statistical analysis or data interpretation
|
| 184 |
-
- Discussing scientific trends, patterns, or experimental results
|
| 185 |
-
- Comparing different options, outcomes, or scenarios
|
| 186 |
-
- Illustrating economic principles, business metrics, or financial concepts
|
| 187 |
-
- Showing survey results, demographic data, or research findings
|
| 188 |
-
- Demonstrating any concept where visualization aids comprehension
|
| 189 |
-
|
| 190 |
-
**Important**: Only use the graph tool when visualization would genuinely help explain a concept. For general conversation, explanations, or questions that don't involve data or relationships, respond normally without tools.
|
| 191 |
-
|
| 192 |
-
## Response Guidelines
|
| 193 |
-
- **For math problems**: Explain concepts, provide formula derivations, and guide through problem-solving steps without computing final numerical answers
|
| 194 |
-
- **For multiple-choice questions**: Discuss the concepts being tested and help students understand how to analyze options rather than identifying the correct choice
|
| 195 |
-
- **For essays or written work**: Discuss research strategies, organizational techniques, and critical thinking approaches rather than providing content or thesis statements
|
| 196 |
-
- **For factual questions**: Provide educational context and encourage students to synthesize information rather than stating direct answers
|
| 197 |
-
|
| 198 |
-
## Communication Guidelines
|
| 199 |
-
- Maintain a supportive, non-judgmental tone in all interactions
|
| 200 |
-
- Assume positive intent while redirecting toward genuine learning
|
| 201 |
-
- Use Socratic questioning to promote discovery and critical thinking
|
| 202 |
-
- Celebrate understanding and progress in the learning process
|
| 203 |
-
- Encourage students to explain their thinking and reasoning
|
| 204 |
-
- Provide honest, accurate feedback even when it may not be what the student wants to hear
|
| 205 |
-
|
| 206 |
-
Your goal is to be an educational partner who empowers students to succeed through understanding, not a service that completes their work for them."""
|
| 207 |
-
|
| 208 |
-
# --- LLM Class Unchanged ---
|
| 209 |
-
logger = logging.getLogger(__name__)
|
| 210 |
-
|
| 211 |
-
class Qwen25SmallLLM:
|
| 212 |
-
def __init__(self, model_path: str = "Qwen/Qwen2.5-3B-Instruct", use_4bit: bool = True):
|
| 213 |
-
logger.info(f"Loading model: {model_path} (use_4bit={use_4bit})")
|
| 214 |
-
|
| 215 |
-
try:
|
| 216 |
-
# Load tokenizer
|
| 217 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
| 218 |
-
|
| 219 |
-
if use_4bit:
|
| 220 |
-
quant_config = BitsAndBytesConfig(
|
| 221 |
-
load_in_4bit=True,
|
| 222 |
-
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 223 |
-
bnb_4bit_use_double_quant=True,
|
| 224 |
-
bnb_4bit_quant_type="nf4"
|
| 225 |
-
)
|
| 226 |
-
logger.info("Using 4-bit quantization with BitsAndBytes")
|
| 227 |
-
else:
|
| 228 |
-
quant_config = BitsAndBytesConfig(
|
| 229 |
-
load_in_8bit=True,
|
| 230 |
-
llm_int8_enable_fp32_cpu_offload=True
|
| 231 |
-
)
|
| 232 |
-
logger.info("Using 8-bit quantization with BitsAndBytes")
|
| 233 |
-
|
| 234 |
-
# Try quantized load
|
| 235 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
| 236 |
-
model_path,
|
| 237 |
-
quantization_config=quant_config,
|
| 238 |
-
device_map="auto",
|
| 239 |
-
torch_dtype=torch.bfloat16,
|
| 240 |
-
trust_remote_code=True,
|
| 241 |
-
low_cpu_mem_usage=True
|
| 242 |
-
)
|
| 243 |
-
|
| 244 |
-
except Exception as e:
|
| 245 |
-
logger.warning(f"Quantized load failed, falling back: {e}")
|
| 246 |
-
self._load_fallback_model(model_path)
|
| 247 |
-
|
| 248 |
-
# Ensure pad token
|
| 249 |
-
if self.tokenizer.pad_token is None:
|
| 250 |
-
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 251 |
-
|
| 252 |
-
def _load_fallback_model(self, model_path: str):
|
| 253 |
-
"""Fallback if quantization fails."""
|
| 254 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
| 255 |
-
model_path,
|
| 256 |
-
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 257 |
-
device_map="auto" if torch.cuda.is_available() else None,
|
| 258 |
-
trust_remote_code=True,
|
| 259 |
-
low_cpu_mem_usage=True
|
| 260 |
-
)
|
| 261 |
-
|
| 262 |
-
def invoke(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
| 263 |
-
try:
|
| 264 |
-
messages = [
|
| 265 |
-
{"role": "system", "content": SYSTEM_PROMPT},
|
| 266 |
-
{"role": "user", "content": prompt}
|
| 267 |
-
]
|
| 268 |
-
text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 269 |
-
|
| 270 |
-
inputs = self.tokenizer([text], return_tensors="pt", padding=True, truncation=True, max_length=2048)
|
| 271 |
-
if torch.cuda.is_available():
|
| 272 |
-
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 273 |
-
|
| 274 |
-
with torch.no_grad():
|
| 275 |
-
outputs = self.model.generate(
|
| 276 |
-
**inputs,
|
| 277 |
-
max_new_tokens=800,
|
| 278 |
-
do_sample=True,
|
| 279 |
-
temperature=0.7,
|
| 280 |
-
top_p=0.9,
|
| 281 |
-
top_k=50,
|
| 282 |
-
repetition_penalty=1.1,
|
| 283 |
-
pad_token_id=self.tokenizer.eos_token_id
|
| 284 |
-
)
|
| 285 |
-
|
| 286 |
-
new_tokens = [out[len(inp):] for inp, out in zip(inputs.input_ids, outputs)]
|
| 287 |
-
return self.tokenizer.batch_decode(new_tokens, skip_special_tokens=True)[0].strip()
|
| 288 |
-
|
| 289 |
-
except Exception as e:
|
| 290 |
-
logger.error(f"Generation error: {e}")
|
| 291 |
-
return f"[Error generating response: {str(e)}]"
|
| 292 |
-
|
| 293 |
-
# --- Modern Agent Implementation ---
|
| 294 |
-
class Educational_Agent:
|
| 295 |
-
"""Modern LangChain agent with LLM-based tool decision making"""
|
| 296 |
-
|
| 297 |
-
def __init__(self):
|
| 298 |
-
self.llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-3B-Instruct")
|
| 299 |
-
self.tool_decision_engine = Tool_Decision_Engine(self.llm)
|
| 300 |
-
self.memory = ConversationBufferWindowMemory(
|
| 301 |
-
memory_key="chat_history",
|
| 302 |
-
return_messages=True,
|
| 303 |
-
k=5
|
| 304 |
-
)
|
| 305 |
-
|
| 306 |
-
def should_use_tools(self, query: str) -> bool:
|
| 307 |
-
"""Use LLM reasoning to determine if tools are needed"""
|
| 308 |
-
return self.tool_decision_engine.should_use_visualization(query)
|
| 309 |
|
| 310 |
-
|
| 311 |
-
"""Create prompt template based on whether tools are available"""
|
| 312 |
-
if has_tools:
|
| 313 |
-
system_content = SYSTEM_PROMPT + "\n\nYou have access to graph creation tools. Use them when visualization would help explain concepts."
|
| 314 |
-
else:
|
| 315 |
-
system_content = SYSTEM_PROMPT + "\n\nRespond using your knowledge without any tools."
|
| 316 |
-
|
| 317 |
-
return ChatPromptTemplate.from_messages([
|
| 318 |
-
("system", system_content),
|
| 319 |
-
("human", "{input}")
|
| 320 |
-
])
|
| 321 |
-
|
| 322 |
-
def process_with_tools(self, query: str) -> str:
|
| 323 |
-
"""Process query with tools available"""
|
| 324 |
-
try:
|
| 325 |
-
# Create agent with tools
|
| 326 |
-
tools = [Create_Graph_Tool]
|
| 327 |
-
|
| 328 |
-
# Use create_react_agent for better control
|
| 329 |
-
agent = create_react_agent(
|
| 330 |
-
self.llm,
|
| 331 |
-
tools,
|
| 332 |
-
state_modifier=self.create_prompt_template(has_tools=True)
|
| 333 |
-
)
|
| 334 |
-
|
| 335 |
-
response = agent.invoke({"messages": [HumanMessage(content=query)]})
|
| 336 |
-
|
| 337 |
-
# Extract the final message content
|
| 338 |
-
if response and "messages" in response:
|
| 339 |
-
final_message = response["messages"][-1]
|
| 340 |
-
if hasattr(final_message, 'content'):
|
| 341 |
-
return final_message.content
|
| 342 |
-
else:
|
| 343 |
-
return str(final_message)
|
| 344 |
-
|
| 345 |
-
return str(response)
|
| 346 |
-
|
| 347 |
-
except Exception as e:
|
| 348 |
-
logger.error(f"Error in tool processing: {e}")
|
| 349 |
-
return f"I apologize, but I encountered an error while processing your request: {str(e)}"
|
| 350 |
-
|
| 351 |
-
def process_without_tools(self, query: str) -> str:
|
| 352 |
-
"""Process query without tools"""
|
| 353 |
-
try:
|
| 354 |
-
response = self.llm.invoke(query)
|
| 355 |
-
return response
|
| 356 |
-
except Exception as e:
|
| 357 |
-
logger.error(f"Error in normal processing: {e}")
|
| 358 |
-
return f"I apologize, but I encountered an error: {str(e)}"
|
| 359 |
-
|
| 360 |
-
def chat(self, message: str) -> str:
|
| 361 |
-
"""Main chat interface with conditional tool usage"""
|
| 362 |
-
try:
|
| 363 |
-
# Determine if tools are needed
|
| 364 |
-
if self.should_use_tools(message):
|
| 365 |
-
logger.info("Query requires visualization - enabling tools")
|
| 366 |
-
return self.process_with_tools(message)
|
| 367 |
-
else:
|
| 368 |
-
logger.info("Query doesn't need tools - responding normally")
|
| 369 |
-
return self.process_without_tools(message)
|
| 370 |
-
|
| 371 |
-
except Exception as e:
|
| 372 |
-
logger.error(f"Error in chat processing: {e}")
|
| 373 |
-
return f"I apologize, but I encountered an error: {str(e)}"
|
| 374 |
-
|
| 375 |
-
# --- Global Agent Instance ---
|
| 376 |
-
agent = None
|
| 377 |
-
|
| 378 |
-
def get_agent():
|
| 379 |
-
"""Get or create the educational agent."""
|
| 380 |
-
global agent
|
| 381 |
-
if agent is None:
|
| 382 |
-
agent = Educational_Agent()
|
| 383 |
-
return agent
|
| 384 |
-
|
| 385 |
-
# --- UI: MathJax Configuration ---
|
| 386 |
-
mathjax_config = '''
|
| 387 |
-
<script>
|
| 388 |
-
window.MathJax = {
|
| 389 |
-
tex: {
|
| 390 |
-
inlineMath: [['\\\\(', '\\\\)']],
|
| 391 |
-
displayMath: [['$', '$'], ['\\\\[', '\\\\]']],
|
| 392 |
-
packages: {'[+]': ['ams']}
|
| 393 |
-
},
|
| 394 |
-
svg: {fontCache: 'global'},
|
| 395 |
-
startup: {
|
| 396 |
-
ready: () => {
|
| 397 |
-
MathJax.startup.defaultReady();
|
| 398 |
-
// Re-render math when new content is added
|
| 399 |
-
const observer = new MutationObserver(function(mutations) {
|
| 400 |
-
MathJax.typesetPromise();
|
| 401 |
-
});
|
| 402 |
-
observer.observe(document.body, {childList: true, subtree: true});
|
| 403 |
-
}
|
| 404 |
-
}
|
| 405 |
-
};
|
| 406 |
-
</script>
|
| 407 |
-
'''
|
| 408 |
-
|
| 409 |
-
# --- HTML Head Content ---
|
| 410 |
-
html_head_content = '''
|
| 411 |
-
<meta charset="utf-8">
|
| 412 |
-
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 413 |
-
<title>Mimir - AI Educational Assistant</title>
|
| 414 |
-
'''
|
| 415 |
-
|
| 416 |
-
# --- Force Light Mode Script ---
|
| 417 |
-
force_light_mode = '''
|
| 418 |
-
<script>
|
| 419 |
-
// Force light theme in Gradio
|
| 420 |
-
window.addEventListener('DOMContentLoaded', function () {
|
| 421 |
-
const gradioURL = window.location.href;
|
| 422 |
-
const url = new URL(gradioURL);
|
| 423 |
-
const currentTheme = url.searchParams.get('__theme');
|
| 424 |
-
|
| 425 |
-
if (currentTheme !== 'light') {
|
| 426 |
-
url.searchParams.set('__theme', 'light');
|
| 427 |
-
window.location.replace(url.toString());
|
| 428 |
-
}
|
| 429 |
-
});
|
| 430 |
-
</script>
|
| 431 |
-
'''
|
| 432 |
-
|
| 433 |
-
# --- Core Logic Functions ---
|
| 434 |
-
def smart_truncate(text, max_length=3000):
|
| 435 |
-
"""Truncates text intelligently to the last full sentence or word."""
|
| 436 |
-
if len(text) <= max_length:
|
| 437 |
-
return text
|
| 438 |
-
|
| 439 |
-
# Try to split by sentence
|
| 440 |
-
sentences = re.split(r'(?<=[.!?])\s+', text[:max_length])
|
| 441 |
-
if len(sentences) > 1:
|
| 442 |
-
return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
|
| 443 |
-
# Otherwise, split by word
|
| 444 |
-
words = text[:max_length].split()
|
| 445 |
-
return ' '.join(words[:-1]) + "... [Response truncated]"
|
| 446 |
-
|
| 447 |
-
def generate_response_with_agent(message, max_retries=3):
|
| 448 |
-
"""Generate response using modern agent with proper tool control."""
|
| 449 |
-
|
| 450 |
-
for attempt in range(max_retries):
|
| 451 |
-
try:
|
| 452 |
-
# Get the agent
|
| 453 |
-
current_agent = get_agent()
|
| 454 |
-
|
| 455 |
-
# Use the agent's chat method with conditional tool usage
|
| 456 |
-
response = current_agent.chat(message)
|
| 457 |
-
|
| 458 |
-
return smart_truncate(response)
|
| 459 |
-
|
| 460 |
-
except Exception as e:
|
| 461 |
-
logger.error(f"Agent error (attempt {attempt + 1}): {e}")
|
| 462 |
-
if attempt < max_retries - 1:
|
| 463 |
-
time.sleep(2)
|
| 464 |
-
continue
|
| 465 |
-
else:
|
| 466 |
-
return f"I apologize, but I encountered an error while processing your message: {str(e)}"
|
| 467 |
-
|
| 468 |
-
def chat_response(message, history=None):
|
| 469 |
-
"""Process chat message and return response."""
|
| 470 |
-
try:
|
| 471 |
-
# Track metrics with timing context
|
| 472 |
-
start_time = time.time()
|
| 473 |
-
timing_context = {
|
| 474 |
-
'start_time': start_time,
|
| 475 |
-
'chunk_count': 0,
|
| 476 |
-
'provider_latency': 0.0
|
| 477 |
-
}
|
| 478 |
-
|
| 479 |
-
try:
|
| 480 |
-
# Log start of interaction
|
| 481 |
-
metrics_tracker.log_interaction(
|
| 482 |
-
query=message,
|
| 483 |
-
response="",
|
| 484 |
-
timing_context=timing_context,
|
| 485 |
-
error_occurred=False
|
| 486 |
-
)
|
| 487 |
-
logger.info("Metrics interaction logged successfully")
|
| 488 |
-
except Exception as metrics_error:
|
| 489 |
-
logger.error(f"Error in metrics_tracker.log_interaction: {metrics_error}")
|
| 490 |
-
|
| 491 |
-
# Generate response with modern agent
|
| 492 |
-
response = generate_response_with_agent(message)
|
| 493 |
-
|
| 494 |
-
# Log final metrics
|
| 495 |
-
try:
|
| 496 |
-
metrics_tracker.log_interaction(
|
| 497 |
-
query=message,
|
| 498 |
-
response=response,
|
| 499 |
-
timing_context=timing_context,
|
| 500 |
-
error_occurred=False
|
| 501 |
-
)
|
| 502 |
-
except Exception as metrics_error:
|
| 503 |
-
logger.error(f"Error in final metrics logging: {metrics_error}")
|
| 504 |
-
|
| 505 |
-
return response
|
| 506 |
-
|
| 507 |
-
except Exception as e:
|
| 508 |
-
logger.error(f"Error in chat_response: {e}")
|
| 509 |
-
return f"I apologize, but I encountered an error while processing your message: {str(e)}"
|
| 510 |
-
|
| 511 |
-
def respond_and_update(message, history):
|
| 512 |
-
"""Main function to handle user submission."""
|
| 513 |
-
if not message.strip():
|
| 514 |
-
return history, ""
|
| 515 |
-
|
| 516 |
-
# Add user message to history
|
| 517 |
-
history.append({"role": "user", "content": message})
|
| 518 |
-
yield history, ""
|
| 519 |
-
|
| 520 |
-
# Generate response
|
| 521 |
-
response = chat_response(message)
|
| 522 |
-
|
| 523 |
-
history.append({"role": "assistant", "content": response})
|
| 524 |
-
yield history, ""
|
| 525 |
-
|
| 526 |
-
def clear_chat():
|
| 527 |
-
"""Clear the chat history."""
|
| 528 |
-
global agent
|
| 529 |
-
if agent is not None:
|
| 530 |
-
agent.memory.clear()
|
| 531 |
-
return [], ""
|
| 532 |
-
|
| 533 |
-
def warmup_agent():
|
| 534 |
-
"""Warm up the agent with a test query to preload everything."""
|
| 535 |
-
logger.info("Warming up agent with test query...")
|
| 536 |
try:
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
# Clear any test data from memory
|
| 544 |
-
current_agent.memory.clear()
|
| 545 |
-
|
| 546 |
except Exception as e:
|
| 547 |
-
|
| 548 |
|
| 549 |
-
#
|
| 550 |
-
|
| 551 |
-
|
| 552 |
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
try:
|
| 556 |
-
with open("styles.css", "r", encoding="utf-8") as css_file:
|
| 557 |
-
custom_css = css_file.read()
|
| 558 |
-
except FileNotFoundError:
|
| 559 |
-
logger.warning("styles.css file not found, using default styling")
|
| 560 |
-
except Exception as e:
|
| 561 |
-
logger.warning(f"Error reading styles.css: {e}")
|
| 562 |
|
| 563 |
-
with gr.Blocks(
|
| 564 |
-
title="Mimir",
|
| 565 |
-
fill_width=True,
|
| 566 |
-
fill_height=True,
|
| 567 |
-
theme=gr.themes.Origin()
|
| 568 |
-
) as demo:
|
| 569 |
-
# Add head content and MathJax
|
| 570 |
-
gr.HTML(html_head_content)
|
| 571 |
-
gr.HTML(force_light_mode)
|
| 572 |
-
gr.HTML('<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>')
|
| 573 |
-
gr.HTML(mathjax_config)
|
| 574 |
-
|
| 575 |
-
with gr.Column(elem_classes=["main-container"]):
|
| 576 |
-
# Title Section
|
| 577 |
-
gr.HTML('<div class="title-header"><h1> Mimir 🎓</h1></div>')
|
| 578 |
-
|
| 579 |
-
# Chat Section
|
| 580 |
-
with gr.Row():
|
| 581 |
-
chatbot = gr.Chatbot(
|
| 582 |
-
type="messages",
|
| 583 |
-
show_copy_button=True,
|
| 584 |
-
show_share_button=False,
|
| 585 |
-
avatar_images=None,
|
| 586 |
-
elem_id="main-chatbot",
|
| 587 |
-
container=False,
|
| 588 |
-
scale=1,
|
| 589 |
-
height="70vh"
|
| 590 |
-
)
|
| 591 |
-
|
| 592 |
-
# Input Section
|
| 593 |
-
with gr.Row(elem_classes=["input-controls"]):
|
| 594 |
-
msg = gr.Textbox(
|
| 595 |
-
placeholder="Ask me about math, research, study strategies, or any educational topic...",
|
| 596 |
-
show_label=False,
|
| 597 |
-
lines=6,
|
| 598 |
-
max_lines=8,
|
| 599 |
-
elem_classes=["input-textbox"],
|
| 600 |
-
container=False,
|
| 601 |
-
scale=4
|
| 602 |
-
)
|
| 603 |
-
with gr.Column(elem_classes=["button-column"], scale=1):
|
| 604 |
-
send = gr.Button("Send", elem_classes=["send-button"], size="sm")
|
| 605 |
-
clear = gr.Button("Clear", elem_classes=["clear-button"], size="sm")
|
| 606 |
-
|
| 607 |
-
# Event handlers
|
| 608 |
-
msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg])
|
| 609 |
-
send.click(respond_and_update, [msg, chatbot], [chatbot, msg])
|
| 610 |
-
clear.click(clear_chat, outputs=[chatbot, msg])
|
| 611 |
-
|
| 612 |
-
# Apply CSS at the very end
|
| 613 |
-
gr.HTML(f'<style>{custom_css}</style>')
|
| 614 |
-
|
| 615 |
-
return demo
|
| 616 |
-
|
| 617 |
-
# --- Main Execution ---
|
| 618 |
-
if __name__ == "__main__":
|
| 619 |
try:
|
| 620 |
-
|
| 621 |
-
logger.info("Starting Mimir Application")
|
| 622 |
-
logger.info("=" * 50)
|
| 623 |
-
|
| 624 |
-
# Step 1: Preload the model and agent
|
| 625 |
-
logger.info("Loading AI model...")
|
| 626 |
-
start_time = time.time()
|
| 627 |
-
agent = Educational_Agent()
|
| 628 |
-
load_time = time.time() - start_time
|
| 629 |
-
logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
|
| 630 |
-
|
| 631 |
-
# Step 2: Warm up the model
|
| 632 |
-
logger.info("Warming up model...")
|
| 633 |
-
warmup_agent()
|
| 634 |
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
server_name="0.0.0.0",
|
| 639 |
-
share=True,
|
| 640 |
-
debug=True,
|
| 641 |
-
favicon_path="assets/favicon.ico"
|
| 642 |
-
)
|
| 643 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 644 |
except Exception as e:
|
| 645 |
-
|
| 646 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import json
|
| 6 |
|
| 7 |
+
def generate_plot(plot_config_json):
|
|
|
|
| 8 |
"""
|
| 9 |
+
Generates a plot (bar, line, or pie) and returns it as an HTML-formatted Base64-encoded image string.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
plot_config_json (str): JSON string containing all plot configuration:
|
| 13 |
+
{
|
| 14 |
+
"data": {"key1": value1, "key2": value2, ...},
|
| 15 |
+
"labels": ["label1", "label2", ...],
|
| 16 |
+
"plot_type": "bar|line|pie",
|
| 17 |
+
"title": "Plot Title",
|
| 18 |
+
"x_label": "X Axis Label" (optional),
|
| 19 |
+
"y_label": "Y Axis Label" (optional)
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
str: An HTML img tag with Base64-encoded plot image.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
"""
|
| 25 |
try:
|
| 26 |
+
# Parse the main JSON configuration
|
| 27 |
+
config = json.loads(plot_config_json)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
except json.JSONDecodeError as e:
|
| 29 |
+
return f'<p style="color:red;">Error parsing JSON configuration: {e}</p>'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
# Extract parameters with defaults
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
try:
|
| 33 |
+
data = config.get("data", {})
|
| 34 |
+
labels = config.get("labels", [])
|
| 35 |
+
plot_type = config.get("plot_type", "bar")
|
| 36 |
+
title = config.get("title", "Untitled Plot")
|
| 37 |
+
x_label = config.get("x_label", "")
|
| 38 |
+
y_label = config.get("y_label", "")
|
|
|
|
|
|
|
|
|
|
| 39 |
except Exception as e:
|
| 40 |
+
return f'<p style="color:red;">Error extracting configuration parameters: {e}</p>'
|
| 41 |
|
| 42 |
+
# Validate inputs
|
| 43 |
+
if not isinstance(data, dict):
|
| 44 |
+
return '<p style="color:red;">Data must be a dictionary with keys as labels and values as numbers.</p>'
|
| 45 |
|
| 46 |
+
if not isinstance(labels, list):
|
| 47 |
+
return '<p style="color:red;">Labels must be a list.</p>'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
try:
|
| 50 |
+
fig, ax = plt.subplots(figsize=(10, 6))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
+
# Extract keys and values from the data dictionary
|
| 53 |
+
x_data = list(data.keys())
|
| 54 |
+
y_data = list(data.values())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
# Ensure y_data contains numeric values
|
| 57 |
+
try:
|
| 58 |
+
y_data = [float(val) for val in y_data]
|
| 59 |
+
except (ValueError, TypeError):
|
| 60 |
+
return '<p style="color:red;">All data values must be numeric.</p>'
|
| 61 |
+
|
| 62 |
+
if plot_type == 'bar':
|
| 63 |
+
bars = ax.bar(x_data, y_data)
|
| 64 |
+
ax.set_xlabel(x_label)
|
| 65 |
+
ax.set_ylabel(y_label)
|
| 66 |
+
|
| 67 |
+
# Add value labels on top of bars
|
| 68 |
+
for bar, value in zip(bars, y_data):
|
| 69 |
+
height = bar.get_height()
|
| 70 |
+
ax.text(bar.get_x() + bar.get_width()/2., height,
|
| 71 |
+
f'{value}', ha='center', va='bottom')
|
| 72 |
+
|
| 73 |
+
elif plot_type == 'line':
|
| 74 |
+
ax.plot(x_data, y_data, marker='o', linewidth=2, markersize=6)
|
| 75 |
+
ax.set_xlabel(x_label)
|
| 76 |
+
ax.set_ylabel(y_label)
|
| 77 |
+
ax.grid(True, alpha=0.3)
|
| 78 |
+
|
| 79 |
+
elif plot_type == 'pie':
|
| 80 |
+
# For pie charts, use labels parameter if provided, otherwise use data keys
|
| 81 |
+
pie_labels = labels if len(labels) == len(y_data) else x_data
|
| 82 |
+
wedges, texts, autotexts = ax.pie(y_data, labels=pie_labels, autopct='%1.1f%%',
|
| 83 |
+
startangle=90, textprops={'fontsize': 10})
|
| 84 |
+
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle
|
| 85 |
+
# Don't set x/y labels for pie charts as they don't make sense
|
| 86 |
+
|
| 87 |
+
else:
|
| 88 |
+
return f'<p style="color:red;">Invalid plot_type: {plot_type}. Choose "bar", "line", or "pie".</p>'
|
| 89 |
+
|
| 90 |
+
ax.set_title(title, fontsize=14, fontweight='bold', pad=20)
|
| 91 |
+
|
| 92 |
+
# Improve layout
|
| 93 |
+
plt.tight_layout()
|
| 94 |
+
|
| 95 |
+
# Save plot to a BytesIO buffer in memory
|
| 96 |
+
buf = BytesIO()
|
| 97 |
+
plt.savefig(buf, format='png', bbox_inches='tight', dpi=150,
|
| 98 |
+
facecolor='white', edgecolor='none')
|
| 99 |
+
plt.close(fig) # Close the plot to free up memory
|
| 100 |
+
|
| 101 |
+
# Encode the image data to a Base64 string
|
| 102 |
+
img_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
| 103 |
+
|
| 104 |
+
# Return HTML img tag with proper styling
|
| 105 |
+
return f'''
|
| 106 |
+
<div style="text-align: center; margin: 20px 0;">
|
| 107 |
+
<img src="data:image/png;base64,{img_base64}"
|
| 108 |
+
style="max-width: 100%; height: auto; border-radius: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.1);"
|
| 109 |
+
alt="{title}" />
|
| 110 |
+
</div>
|
| 111 |
+
'''
|
| 112 |
+
|
| 113 |
except Exception as e:
|
| 114 |
+
plt.close('all') # Clean up any open figures
|
| 115 |
+
return f'<p style="color:red;">Error generating plot: {str(e)}</p>'
|
| 116 |
+
|
| 117 |
+
# Example usage:
|
| 118 |
+
# plot_config = {
|
| 119 |
+
# "data": {"A": 10, "B": 20, "C": 15},
|
| 120 |
+
# "labels": ["Category A", "Category B", "Category C"],
|
| 121 |
+
# "plot_type": "bar",
|
| 122 |
+
# "title": "Sample Bar Chart",
|
| 123 |
+
# "x_label": "Categories",
|
| 124 |
+
# "y_label": "Values"
|
| 125 |
+
# }
|
| 126 |
+
# result = generate_plot_single_input(json.dumps(plot_config))
|