Plantoi / app.py
Al1Abdullah's picture
Fix: Enable Secure Cookies for HF Spaces iframe support
4241669
import os
import pickle
import json
import re
import numpy as np
from bytez import Bytez # Import openai
from flask import Flask, request, jsonify, render_template, session, Response, stream_with_context
from PIL import Image
from disease_data import DISEASE_INFO
from showcase_data import SHOWCASE_DATA
import io
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from werkzeug.utils import secure_filename
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import seaborn as sns
import base64
from io import BytesIO
import cv2
import threading
sdk = Bytez('4d16a7e053afc613fc7f85b460549ef9')
# --- Matplotlib Configuration ---
plt.switch_backend('Agg')
# --- Configuration ---
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# --- Flask App Initialization ---
app = Flask(__name__)
app.secret_key = 'plantoi_secret'
app.config['SESSION_COOKIE_SAMESITE'] = 'None'
app.config['SESSION_COOKIE_SECURE'] = True
app.config['UPLOAD_FOLDER'] = 'static/uploads'
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
# --- AI Model Loading ---
model = None
class_indices = None
class_names = None
def load_ai_models():
"""Load Keras model and class indices in a background thread."""
global model, class_indices, class_names
try:
print("Loading Keras model in background thread...")
model = load_model('model/plant_disease_model_final.h5')
with open('data/class_indices.pkl', 'rb') as f:
class_indices = pickle.load(f)
class_names = {v: k for k, v in class_indices.items()}
print("✅ Keras model, class indices, and disease info loaded successfully.")
except Exception as e:
model = None
class_indices = None
class_names = None
print(f"❌ Error loading Keras model or class indices: {e}")
# Start background loading so the Flask server can start immediately
loader_thread = threading.Thread(target=load_ai_models, daemon=True)
loader_thread.start()
# --- Helper Functions ---
def preprocess_image(image_bytes):
try:
img = Image.open(io.BytesIO(image_bytes)).convert('RGB')
img = img.resize((224, 224))
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
return img_array
except Exception as e:
print(f"Error in preprocessing image: {e}")
return None
def analyze_image_with_moondream(image_bytes):
"""
Analyzes an image using a multimodal model via Bytez Cloud API and returns the raw text response.
NOTE: Multimodal support with the native Bytez SDK is not yet directly integrated here.
"""
return "Image analysis using the native Bytez SDK is not yet supported. Please contact support for multimodal model integration."
# --- Flask Routes ---
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/classes')
def get_model_classes():
if class_names:
# Use the keys from DISEASE_INFO as the source of truth for display and IDs
# This ensures consistency with what the user has defined as the model's output
sorted_classes = sorted(list(DISEASE_INFO.keys()))
return jsonify(sorted_classes)
return jsonify({"error": "Class names not available or model not loaded."}), 500
@app.route('/api/showcase/<path:class_id>')
def get_showcase_data(class_id):
"""Returns the showcase data for a specific class."""
print(f"Requested Class ID: {class_id}") # Debug print statement as requested
data = SHOWCASE_DATA.get(class_id)
if data:
return jsonify(data)
print(f"DEBUG: Failed to find '{class_id}' in SHOWCASE_DATA.")
return jsonify({"error": f"Class ID '{class_id}' not found in showcase data."}), 404
@app.route('/diagnose', methods=['POST'])
def diagnose():
if 'file' not in request.files:
return jsonify({"error": "No file part in the request. The key should be 'file'."}), 400
file = request.files['file']
if file.filename == '':
return jsonify({"error": "No selected file."}), 400
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
# Store absolute path in session for visualization
session['current_img_path'] = os.path.abspath(filepath)
file.seek(0)
image_bytes = file.read()
if model is None or class_names is None:
return jsonify({"error": "AI models are not loaded. Please check server logs."}), 500
processed_image = preprocess_image(image_bytes)
if processed_image is None:
return jsonify({"error": "Failed to preprocess image."}), 400
# Step A: Get Prediction & Confidence
prediction = model.predict(processed_image)
# Store prediction data in session
session['last_prediction'] = prediction[0].tolist()
session['last_confidence'] = float(np.max(prediction))
confidence = np.max(prediction)
predicted_class_index = np.argmax(prediction)
predicted_class_name = class_names.get(predicted_class_index, "Unknown")
# Add debugging print statement
print(f"DEBUG: Model predicted '{predicted_class_name}' (Confidence: {confidence:.2f})")
# Step B (Fast Lane)
report_data = DISEASE_INFO.get(predicted_class_name)
if confidence > 0.55 and report_data:
print(f"Fast Lane: Confidence > 55% ({confidence:.2f}). Found '{predicted_class_name}' in DB.")
report = {
"disease_name": predicted_class_name.replace('___', ' ').replace('_', ' '),
"health_status": f"{report_data['status']}. {report_data['description']}",
"detailed_symptoms": "\n".join([f"- {s}" for s in report_data['symptoms']]),
"prevention_methods": "\n".join([f"- {p}" for p in report_data['prevention']]),
"smart_analysis": "Diagnosis from internal knowledge base. High confidence.",
"confidence": float(confidence),
"source_model": "Keras + DB"
}
session['last_diagnosis'] = report
return jsonify(report)
# Step C (Unknown Lane - Simplified)
print(f"Unknown Lane: Confidence <= 55% or class not in DB. Using Moondream for text analysis.")
ai_text_response = analyze_image_with_moondream(image_bytes)
report = {
"disease_name": "AI Visual Analysis",
"health_status": "See analysis below.",
"detailed_symptoms": ai_text_response, # The entire AI response is placed here
"prevention_methods": "Please refer to the analysis above.",
"smart_analysis": "Analysis provided by Moondream AI. This is a general analysis and may not be as specific as a diagnosis for a known plant type.",
"confidence": float(confidence), # Still show Keras confidence for context
"source_model": 'Moondream AI'
}
session['last_diagnosis'] = report
return jsonify(report)
@app.route('/chat', methods=['POST'])
def chat():
user_message = request.json.get('message')
if not user_message:
return jsonify({'error': 'No message provided'}), 400
# Initialize chat history in session if it doesn't exist
if 'chat_history' not in session:
session['chat_history'] = []
# Retrieve the last diagnosis context from the session
last_diagnosis = session.get('last_diagnosis', {})
diagnosis_context = ""
if last_diagnosis:
diagnosis_context = f"""
Here is the last plant diagnosis result:
Disease Name: {last_diagnosis.get('disease_name', 'N/A')}
Health Status: {last_diagnosis.get('health_status', 'N/A')}
Detailed Symptoms: {last_diagnosis.get('detailed_symptoms', 'N/A')}
Prevention Methods: {last_diagnosis.get('prevention_methods', 'N/A')}
Smart Analysis: {last_diagnosis.get('smart_analysis', 'N/A')}
Confidence: {last_diagnosis.get('confidence', 'N/A') * 100:.2f}%
Source Model: {last_diagnosis.get('source_model', 'N/A')}
"""
# Construct a system message for the AI
system_message_content = f"""You are a professional Botanical Expert.
Rule 1: If the user asks a simple question (e.g., 'Hi', 'How are you?'), reply with a warm, concise 1-sentence response.
Rule 2: If the user asks a technical or complex question, provide a structured response with Headings (##), Bold text for key terms, and Bullet points for steps.
Rule 3: Never be too long or too short; maintain a balanced, helpful tone.
{diagnosis_context}
Please answer the user's question. If the question is related to the diagnosis, use the provided context.
If the context is not sufficient, mention that.
"""
# Append user's message to chat history
session['chat_history'].append({'role': 'user', 'content': user_message})
# Create the full messages list to send to Ollama
# Ensure system message is always the first.
messages_to_send = [{"role": "system", "content": system_message_content}] + session['chat_history'][-10:] # Keep last 10 chat messages
def generate_bytez_responses():
full_ai_response_content = ""
model_id = 'avans06/Meta-Llama-3.2-8B-Instruct' # New Bytez model ID
print(f"Using Model: {model_id}") # Log model ID
try:
# Prepare messages as a list of dictionaries with roles for the Bytez SDK
# The first message is the system prompt (rules + diagnosis context)
messages_for_sdk = [
{"role": "system", "content": system_message_content}
]
# Append previous chat history messages (excluding the system message already added)
for msg in session['chat_history'][-9:]: # Keep last 9 messages + current user message
# Ensure the message role is 'user' or 'assistant'
if msg['role'] in ['user', 'assistant']:
messages_for_sdk.append({"role": msg['role'], "content": msg['content']})
# Add the current user message as the last entry
# The current user_message is already added to session['chat_history'] above,
# so it should be included in the loop, or we add the latest one explicitly.
# Let's adjust the loop to include up to the current user message.
# Re-evaluating the session['chat_history'] logic: it contains ALL messages.
# So, `messages_to_send` (which includes system message and relevant history)
# is actually better as a base.
messages_for_sdk = []
messages_for_sdk.append({"role": "system", "content": system_message_content})
for msg in session['chat_history'][-10:]: # Get last 10 messages from history
messages_for_sdk.append({"role": msg['role'], "content": msg['content']})
llm_model = sdk.model(model_id)
result = llm_model.run(messages_for_sdk) # Pass structured messages directly
if result.error: # Check for error in result
print(f'Bytez SDK Error: {result.error}')
error_msg = f'AI Error: {str(result.error)}' # Capture error as a string
yield error_msg.encode('utf-8') # Yield specific error message
return # Stop generation on error
if isinstance(result.output, dict):
content = result.output.get('content', '')
else:
content = str(result.output) # Extract the string content
if content is not None: # Safety check before encoding
full_ai_response_content = content
yield content.encode('utf-8') # Encode for Flask streaming
else:
yield "Error: Bytez SDK returned no output.".encode('utf-8')
except Exception as e:
app.logger.error(f"Bytez SDK error in chat: {e}", exc_info=True)
yield f"Error: An unexpected error occurred with the Bytez SDK. Details: {e}".encode('utf-8') # Encode error messages too
finally:
if full_ai_response_content:
session['chat_history'].append({'role': 'assistant', 'content': full_ai_response_content})
session['chat_history'] = session['chat_history'][-10:]
return Response(stream_with_context(generate_bytez_responses()), mimetype='text/plain')
@app.route('/visualize', methods=['GET'])
def visualize():
# Retrieve the image path from session
filepath = session.get('current_img_path', '')
if not filepath or filepath == '':
return jsonify({"error": "No image has been diagnosed yet."}), 400
if not os.path.exists(filepath):
return jsonify({"error": "Could not find the diagnosed image file."}), 404
# --- Retrieve data from session ---
prediction = np.array(session.get('last_prediction', []))
top_confidence = session.get('last_confidence', 0)
predicted_class_index = np.argmax(prediction) if prediction.size > 0 else 0
predicted_class_name = class_names.get(predicted_class_index, "Unknown").replace('___', ' ').replace('_', ' ')
if prediction.size == 0:
return jsonify({"error": "Prediction data not found in session."}), 400
# --- Advanced Image Processing ---
try:
img = Image.open(filepath).convert('RGB')
img_np = np.array(img)
original_height, original_width = img_np.shape[:2]
# Define "Healthy Green" as pixels where G > R + 15 and G > B + 15
healthy_mask = (img_np[:, :, 1] > img_np[:, :, 0] + 15) & (img_np[:, :, 1] > img_np[:, :, 2] + 15)
healthy_pixels = np.sum(healthy_mask)
total_pixels = img_np.shape[0] * img_np.shape[1]
necrotic_infected_pixels = total_pixels - healthy_pixels
healthy_ratio = healthy_pixels / total_pixels if total_pixels > 0 else 0.0
necrotic_ratio = necrotic_infected_pixels / total_pixels if total_pixels > 0 else 0.0
# Advanced disease region detection using color analysis
# Detect brown/yellow regions (common disease indicators)
# Convert RGB to BGR for OpenCV, then to HSV
img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
hsv_img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
brown_mask = (hsv_img[:, :, 0] >= 10) & (hsv_img[:, :, 0] <= 30) & (hsv_img[:, :, 1] >= 50)
yellow_mask = (hsv_img[:, :, 0] >= 20) & (hsv_img[:, :, 0] <= 30) & (hsv_img[:, :, 2] >= 100)
disease_regions = np.logical_or(brown_mask, yellow_mask)
disease_pixel_ratio = np.sum(disease_regions) / total_pixels if total_pixels > 0 else 0.0
# Calculate health score (0-100): based on healthy pixels, confidence, and disease regions
is_healthy_class = 'Healthy' in predicted_class_name
base_score = healthy_ratio * 100
confidence_bonus = top_confidence * 20 if is_healthy_class else 0
disease_penalty = disease_pixel_ratio * 30
health_score = min(100, max(0, base_score + confidence_bonus - disease_penalty))
# Calculate severity level
if health_score >= 80:
severity = "Low"
severity_color = "#22c55e"
elif health_score >= 50:
severity = "Moderate"
severity_color = "#f59e0b"
else:
severity = "High"
severity_color = "#ef4444"
# Calculate pigmentation ratios (RGB channel ratios)
red_ratio = np.mean(img_np[:, :, 0]) / 255.0
green_ratio = np.mean(img_np[:, :, 1]) / 255.0
blue_ratio = np.mean(img_np[:, :, 2]) / 255.0
# Normalize RGB ratios for pie chart
rgb_total = red_ratio + green_ratio + blue_ratio
if rgb_total > 0:
red_pct = red_ratio / rgb_total
green_pct = green_ratio / rgb_total
blue_pct = blue_ratio / rgb_total
else:
red_pct, green_pct, blue_pct = 0.33, 0.33, 0.34
# Calculate color variance (pigmentation variation - indicates disease spots)
color_variance = np.var(img_np.reshape(-1, 3), axis=0)
var_total = np.sum(color_variance)
if var_total > 0:
var_red = color_variance[0] / var_total
var_green = color_variance[1] / var_total
var_blue = color_variance[2] / var_total
else:
var_red, var_green, var_blue = 0.33, 0.33, 0.34
# Calculate edge density (diseased leaves often have more defined edges/patterns)
# Convert RGB to BGR for OpenCV, then to grayscale
img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150)
edge_density = np.sum(edges > 0) / total_pixels if total_pixels > 0 else 0.0
# Calculate chlorophyll index approximation (healthy leaves have higher green values)
chlorophyll_index = green_ratio - 0.3 # Baseline adjustment
chlorophyll_index = max(0, min(1, chlorophyll_index))
except Exception as e:
print(f"Error during pixel analysis: {e}")
healthy_ratio, necrotic_ratio = 0.5, 0.5
disease_pixel_ratio = 0.3
health_score = 50.0
severity = "Unknown"
severity_color = "#6b7280"
red_pct, green_pct, blue_pct = 0.33, 0.33, 0.34
var_red, var_green, var_blue = 0.33, 0.33, 0.34
edge_density = 0.1
chlorophyll_index = 0.5
img_np = np.zeros((224, 224, 3), dtype=np.uint8)
# --- Premium Plotting Engine ---
try:
plt.style.use('dark_background')
sns.set_palette("husl")
fig = plt.figure(figsize=(20, 12), dpi=150)
fig.patch.set_alpha(0.0)
fig.suptitle('Plant Health Analysis Dashboard', color='white', fontsize=20, fontweight='bold', y=0.98)
# Create a 2x3 grid layout
gs = GridSpec(2, 2, figure=fig, hspace=0.6, wspace=0.4, left=0.08, right=0.92, top=0.92, bottom=0.08)
# Chart 1: Health Metrics Analysis (Top Left)
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_facecolor('none')
# Create severity indicators with visual bars
metrics = [
('Healthy Tissue', healthy_ratio * 100, '#22c55e'),
('Disease Regions', disease_pixel_ratio * 100, '#ef4444'),
('Necrotic Areas', necrotic_ratio * 100, '#dc2626'),
('Edge Density', edge_density * 100, '#8b5cf6'),
('Chlorophyll Index', chlorophyll_index * 100, '#10b981')
]
y_pos = np.arange(len(metrics))
bars = ax1.barh(y_pos, [m[1] for m in metrics], color=[m[2] for m in metrics],
alpha=0.8, edgecolor='white', linewidth=1.5, height=0.6)
ax1.set_yticks(y_pos)
ax1.set_yticklabels([m[0] for m in metrics], color='white', fontsize=10, fontweight='bold')
ax1.set_xlabel('Percentage (%)', color='white', fontweight='bold', fontsize=11)
ax1.set_title('Health Metrics Analysis', color='white', fontweight='bold', fontsize=13, pad=10)
ax1.set_xlim(0, 100)
ax1.tick_params(axis='x', colors='white')
ax1.grid(True, alpha=0.2, color='white', axis='x')
# Add value labels
for bar, val in zip(bars, [m[1] for m in metrics]):
width = bar.get_width()
ax1.text(width + 2, bar.get_y() + bar.get_height()/2, f'{val:.1f}%',
ha='left', va='center', color='white', fontsize=9, fontweight='bold')
# Chart 2: Color Variance Heatmap (Top Center)
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_facecolor('none')
# Create a color variance map
if img_np.shape[0] > 0 and img_np.shape[1] > 0:
# Calculate local variance in a grid
grid_size = 20
h_step = max(1, img_np.shape[0] // grid_size)
w_step = max(1, img_np.shape[1] // grid_size)
variance_map = np.zeros((grid_size, grid_size))
for i in range(grid_size):
for j in range(grid_size):
y_start, y_end = i * h_step, min((i + 1) * h_step, img_np.shape[0])
x_start, x_end = j * w_step, min((j + 1) * w_step, img_np.shape[1])
region = img_np[y_start:y_end, x_start:x_end]
if region.size > 0:
variance_map[i, j] = np.var(region)
im = ax2.imshow(variance_map, cmap='RdYlGn_r', aspect='auto', interpolation='bilinear')
ax2.set_title('Color Variance Heatmap\n(Disease Hotspots)', color='white', fontweight='bold', fontsize=12, pad=10)
ax2.set_xlabel('Horizontal Position', color='white', fontsize=10)
ax2.set_ylabel('Vertical Position', color='white', fontsize=10)
ax2.tick_params(colors='white', labelsize=8)
plt.colorbar(im, ax=ax2, label='Variance', pad=0.02)
# Chart 4: Tissue Health Composition (Bottom Left)
ax4 = fig.add_subplot(gs[1, 0])
ax4.set_facecolor('none')
# Enhanced pie chart with donut style
sizes = [healthy_ratio * 100, disease_pixel_ratio * 100, necrotic_ratio * 100]
labels = ['Healthy', 'Disease Regions', 'Necrotic']
colors_pie = ['#22c55e', '#f59e0b', '#ef4444']
explode = (0.05, 0.05, 0.05)
wedges, texts, autotexts = ax4.pie(sizes, labels=labels, colors=colors_pie, autopct='%1.1f%%',
startangle=90, explode=explode, shadow=True,
textprops={'color': 'white', 'weight': 'bold', 'fontsize': 10},
wedgeprops=dict(width=0.5, edgecolor='white', linewidth=2))
ax4.set_title('Tissue Health Composition', color='white', fontweight='bold', fontsize=12, pad=10)
# Chart 5: Spider Web Chart - Plant Health Risk Classification (Bottom Center & Right)
ax5 = fig.add_subplot(gs[1, 1], projection='polar')
ax5.set_facecolor('none')
ax5.patch.set_alpha(0.0)
# Calculate risk level based on multiple factors
# Factors for risk assessment
factors = {
'Health Score': health_score,
'Disease Coverage': 100 - (disease_pixel_ratio * 100),
'Chlorophyll': chlorophyll_index * 100,
'Tissue Integrity': healthy_ratio * 100,
'Disease Severity': 100 - (necrotic_ratio * 100),
'Recovery Potential': max(0, 100 - (disease_pixel_ratio * 100) - (necrotic_ratio * 100))
}
# Determine overall risk category
avg_health = np.mean(list(factors.values()))
if avg_health >= 80:
risk_category = "Can Improve"
risk_color = '#22c55e'
risk_level = 1 # Low risk
elif avg_health >= 60:
risk_category = "Stable / Less Dangerous"
risk_color = '#f59e0b'
risk_level = 2 # Moderate risk
elif avg_health >= 30:
risk_category = "Extremely Dangerous"
risk_color = '#ef4444'
risk_level = 3 # High risk
else:
risk_category = "Totally Dead Plant"
risk_color = '#7f1d1d'
risk_level = 4 # Critical risk
# Prepare data for radar chart
categories = list(factors.keys())
values = list(factors.values())
N = len(categories)
# Compute angle for each category
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1] # Complete the circle
values += values[:1]
# Plot the spider web
ax5.plot(angles, values, 'o-', linewidth=3, color=risk_color, alpha=0.8, label='Current Status')
ax5.fill(angles, values, alpha=0.25, color=risk_color)
# Add reference zones for different risk levels
# Zone 1: Can Improve (green zone - 80-100)
zone1_angles = np.linspace(0, 2*np.pi, 100)
zone1_values = [80] * 100
ax5.plot(zone1_angles, zone1_values, '--', linewidth=1, color='#22c55e', alpha=0.3, label='Can Improve (80+)')
ax5.fill_between(zone1_angles, [80]*100, [100]*100, alpha=0.1, color='#22c55e')
# Zone 2: Stable (yellow zone - 60-80)
zone2_values = [60] * 100
ax5.plot(zone1_angles, zone2_values, '--', linewidth=1, color='#f59e0b', alpha=0.3, label='Stable (60-80)')
ax5.fill_between(zone1_angles, [60]*100, [80]*100, alpha=0.1, color='#f59e0b')
# Zone 3: Dangerous (red zone - 30-60)
zone3_values = [30] * 100
ax5.plot(zone1_angles, zone3_values, '--', linewidth=1, color='#ef4444', alpha=0.3, label='Dangerous (30-60)')
ax5.fill_between(zone1_angles, [30]*100, [60]*100, alpha=0.1, color='#ef4444')
# Zone 4: Dead (dark red zone - 0-30)
ax5.fill_between(zone1_angles, [0]*100, [30]*100, alpha=0.1, color='#7f1d1d', label='Dead (0-30)')
# Set labels
ax5.set_xticks(angles[:-1])
ax5.set_xticklabels(categories, color='white', fontsize=10, fontweight='bold')
ax5.set_ylim(0, 100)
ax5.set_yticks([25, 50, 75, 100])
ax5.set_yticklabels(['25', '50', '75', '100'], color='white', fontsize=8)
ax5.grid(True, linestyle='--', linewidth=1, alpha=0.3, color='white')
ax5.set_theta_offset(np.pi / 2)
ax5.set_theta_direction(-1)
# Add title with risk category
ax5.set_title(f'Plant Health Risk Assessment\nStatus: {risk_category}',
color=risk_color, fontweight='bold', fontsize=14, pad=20)
# Add legend
ax5.legend(loc='upper right', bbox_to_anchor=(1.05, 1),
facecolor='#141c2e', edgecolor='white', labelcolor='white', fontsize=8)
# Add value labels on the spider web
for angle, value, category in zip(angles[:-1], values[:-1], categories):
ax5.text(angle, value + 5, f'{value:.0f}%',
ha='center', va='center', color='white',
fontsize=9, fontweight='bold', bbox=dict(boxstyle='round,pad=0.3',
facecolor=risk_color, alpha=0.7, edgecolor='white', linewidth=1))
# Add diagnosis info text at the bottom
diagnosis_text = f"Diagnosis: {predicted_class_name[:40]} | Confidence: {top_confidence*100:.1f}% | Model: Keras CNN"
fig.text(0.5, 0.01, diagnosis_text, ha='center', va='bottom',
color='white', fontsize=10, style='italic', alpha=0.8)
# --- Save to Buffer and Return ---
buf = BytesIO()
fig.savefig(buf, format='png', transparent=True, dpi=120, bbox_inches='tight')
buf.seek(0)
img_b64 = base64.b64encode(buf.getvalue()).decode('utf-8')
graph_url = f"data:image/png;base64,{img_b64}"
plt.close(fig) # Close figure to free memory
return jsonify({"graph": graph_url})
except Exception as e:
print(f"Error during visualization generation: {e}")
import traceback
traceback.print_exc()
# Ensure any open figures are closed
plt.close('all')
return jsonify({"error": f"Failed to generate visualization: {str(e)}"}), 500
# --- Main Execution ---
if __name__ == '__main__':
# Disable the auto-reloader to avoid double-importing heavy ML libraries
app.run(host='0.0.0.0', port=7860, debug=False, use_reloader=False)