smart-traffic / app.py
ganeshkumar383's picture
Update app.py
6f32cee verified
"""
Smart Public Queue Traffic Analyzer & Decision Assistant - ENHANCED VERSION
Final Year Engineering Project with INNOVATIVE Features
Uses Computer Vision + Advanced Analytics + AI Recommendations
"""
import gradio as gr
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg') # Fix for headless environments
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image, ImageDraw, ImageFont
import io
import tempfile
import os
from datetime import datetime, timedelta
import base64
class EnhancedQueueAnalyzer:
"""Advanced queue analysis engine with innovative features"""
def __init__(self):
# Initialize HOG descriptor
self.hog = cv2.HOGDescriptor()
self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Queue type service time rules (minutes per person)
self.service_times = {
"College Office": 2,
"Hospital": 5,
"Railway Counter": 3,
"Supermarket": 1
}
# Peak hours data (simulated based on queue type)
self.peak_hours = {
"College Office": [(9, 11), (14, 16)], # 9-11 AM, 2-4 PM
"Hospital": [(10, 12), (15, 17)], # 10-12 AM, 3-5 PM
"Railway Counter": [(7, 9), (17, 19)], # 7-9 AM, 5-7 PM
"Supermarket": [(18, 20)], # 6-8 PM
}
# Social distancing threshold (pixels - approximate)
self.min_distance_threshold = 100
def detect_people(self, image):
"""Detect people using HOG+SVM with optimized parameters"""
height, width = image.shape[:2]
scale = 1.0
if width > 800:
scale = 800 / width
image = cv2.resize(image, None, fx=scale, fy=scale)
boxes, weights = self.hog.detectMultiScale(
image,
winStride=(8, 8),
padding=(4, 4),
scale=1.05,
useMeanshiftGrouping=False
)
if scale != 1.0:
boxes = [[int(x/scale), int(y/scale), int(w/scale), int(h/scale)]
for x, y, w, h in boxes]
# Calculate centroids for distance analysis
centroids = []
for (x, y, w, h) in boxes:
cx = x + w // 2
cy = y + h // 2
centroids.append((cx, cy))
return boxes, len(boxes), centroids
def check_social_distancing(self, centroids):
"""
INNOVATIVE FEATURE 1: Social Distancing Analysis
Check if people maintain safe distance
"""
if len(centroids) < 2:
return True, 100, []
violations = []
total_distances = []
for i in range(len(centroids)):
for j in range(i + 1, len(centroids)):
dist = np.sqrt((centroids[i][0] - centroids[j][0])**2 +
(centroids[i][1] - centroids[j][1])**2)
total_distances.append(dist)
if dist < self.min_distance_threshold:
violations.append((i, j, dist))
avg_distance = np.mean(total_distances) if total_distances else 0
compliance = ((len(total_distances) - len(violations)) / len(total_distances) * 100) if total_distances else 100
return len(violations) == 0, compliance, violations
def create_density_heatmap(self, image_shape, centroids):
"""
INNOVATIVE FEATURE 2: Crowd Density Heatmap
Generate visual heatmap of crowd concentration
"""
height, width = image_shape[:2]
heatmap = np.zeros((height, width), dtype=np.float32)
for (cx, cy) in centroids:
# Create Gaussian blob around each person
y, x = np.ogrid[:height, :width]
mask = np.exp(-((x - cx)**2 + (y - cy)**2) / (2 * 50**2))
heatmap += mask
# Normalize
if heatmap.max() > 0:
heatmap = (heatmap / heatmap.max() * 255).astype(np.uint8)
# Apply colormap
heatmap_colored = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
return heatmap_colored
def calculate_queue_flow_rate(self, frame_counts):
"""
INNOVATIVE FEATURE 3: Queue Flow Rate Analysis
Estimate how fast people are being served
"""
if len(frame_counts) < 2:
return 0, "Insufficient data"
# Calculate rate of change
changes = np.diff(frame_counts)
avg_change = np.mean(changes)
# Positive change = people joining, negative = people leaving
if avg_change < -0.5:
flow_rate = "Fast (People leaving quickly)"
flow_score = "🟒 Excellent"
elif avg_change < 0:
flow_rate = "Moderate (Steady movement)"
flow_score = "🟑 Good"
else:
flow_rate = "Slow (Queue growing)"
flow_score = "πŸ”΄ Poor"
return avg_change, f"{flow_score} - {flow_rate}"
def predict_best_time(self, queue_type, current_count):
"""
INNOVATIVE FEATURE 4: Peak Time Prediction
Suggest best time to visit based on patterns
"""
current_hour = datetime.now().hour
peak_periods = self.peak_hours.get(queue_type, [(9, 17)])
# Check if currently in peak time
is_peak = any(start <= current_hour < end for start, end in peak_periods)
recommendations = []
if is_peak:
# Find next off-peak time
for start, end in peak_periods:
if current_hour < start:
recommendations.append(f"Before {start}:00 AM")
elif current_hour >= end:
continue
else:
recommendations.append(f"After {end}:00 PM")
else:
recommendations.append("Now is a good time! βœ…")
# Add general recommendations
if queue_type == "College Office":
recommendations.append("Best: Early morning (8-9 AM) or late afternoon (4-5 PM)")
elif queue_type == "Hospital":
recommendations.append("Best: Early morning (8-9 AM) to avoid crowds")
elif queue_type == "Railway Counter":
recommendations.append("Best: Mid-day (11 AM - 2 PM) for shorter queues")
elif queue_type == "Supermarket":
recommendations.append("Best: Weekday mornings (9-11 AM)")
return is_peak, recommendations
def generate_smart_alerts(self, count, wait_time, compliance, queue_type):
"""
INNOVATIVE FEATURE 5: Smart Alert System
Generate actionable alerts based on conditions
"""
alerts = []
# Crowd alert
if count > 15:
alerts.append("⚠️ HIGH CROWD: Consider visiting later")
elif count > 10:
alerts.append("⚑ MODERATE CROWD: Expect some waiting")
# Wait time alert
if wait_time > 30:
alerts.append("πŸ• LONG WAIT: Wait time exceeds 30 minutes")
# Social distancing alert
if compliance < 70:
alerts.append("🚨 SOCIAL DISTANCING: Low compliance detected")
# Queue-specific alerts
if queue_type == "Hospital" and count > 8:
alerts.append("πŸ₯ HOSPITAL: Consider emergency alternatives if urgent")
elif queue_type == "Supermarket" and count > 12:
alerts.append("πŸ›’ SUPERMARKET: Try self-checkout or delivery options")
if not alerts:
alerts.append("βœ… ALL CLEAR: Good conditions for visit")
return alerts
def annotate_image_advanced(self, image, boxes, centroids, violations, count, wait_time, decision):
"""Enhanced annotation with social distancing violations"""
annotated = image.copy()
# Draw bounding boxes
for idx, (x, y, w, h) in enumerate(boxes):
# Check if this person is involved in violation
is_violation = any(idx in [v[0], v[1]] for v in violations)
color = (0, 0, 255) if is_violation else (0, 255, 0)
cv2.rectangle(annotated, (x, y), (x + w, y + h), color, 2)
# Draw violation lines
for (i, j, dist) in violations:
cv2.line(annotated, centroids[i], centroids[j], (0, 0, 255), 2)
mid_x = (centroids[i][0] + centroids[j][0]) // 2
mid_y = (centroids[i][1] + centroids[j][1]) // 2
cv2.putText(annotated, f"{dist:.0f}px", (mid_x, mid_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
# Overlay information
overlay_height = 120
overlay = annotated.copy()
cv2.rectangle(overlay, (0, 0), (annotated.shape[1], overlay_height),
(0, 0, 0), -1)
cv2.addWeighted(overlay, 0.7, annotated, 0.3, 0, annotated)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(annotated, f"People Count: {count}", (10, 30),
font, 1, (255, 255, 255), 2)
cv2.putText(annotated, f"Wait Time: {wait_time:.0f} min", (10, 70),
font, 1, (255, 255, 255), 2)
decision_color = self._get_decision_color(wait_time)
cv2.putText(annotated, f"Decision: {decision}", (10, 110),
font, 1, decision_color, 2)
return annotated
def _get_decision_color(self, wait_time):
if wait_time < 10:
return (0, 255, 0)
elif wait_time <= 20:
return (0, 255, 255)
else:
return (0, 0, 255)
def calculate_wait_time(self, queue_size, queue_type):
service_time = self.service_times.get(queue_type, 2)
return queue_size * service_time
def make_decision(self, wait_time):
if wait_time < 10:
return "🟒 Go Now", "success"
elif wait_time <= 20:
return "🟑 Moderate Wait", "warning"
else:
return "πŸ”΄ Come Later", "error"
def process_image(self, image_path, queue_type, show_analytics):
"""Process single image with enhanced analytics"""
image = cv2.imread(image_path)
if image is None:
return None, "Error: Could not read image", None, None
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Detect people
boxes, count, centroids = self.detect_people(image)
# Social distancing analysis
is_compliant, compliance, violations = self.check_social_distancing(centroids)
# Calculate metrics
wait_time = self.calculate_wait_time(count, queue_type)
decision_text, decision_type = self.make_decision(wait_time)
# Best time prediction
is_peak, time_recommendations = self.predict_best_time(queue_type, count)
# Smart alerts
alerts = self.generate_smart_alerts(count, wait_time, compliance, queue_type)
# Annotate image
annotated = self.annotate_image_advanced(image, boxes, centroids, violations,
count, wait_time, decision_text)
# Prepare enhanced metrics
metrics = f"""
### Queue Analysis Results
** People Count:** {count}
**⏱ Estimated Waiting Time:** {wait_time:.0f} minutes
** Decision:** {decision_text}
---
### Social Distancing Analysis
**Compliance Score:** {compliance:.1f}%
**Status:** {"βœ… Compliant" if is_compliant else "⚠️ Violations Detected"}
**Violations:** {len(violations)} pairs too close
---
### Best Time to Visit
**Current Status:** {"πŸ”΄ Peak Hour" if is_peak else "🟒 Off-Peak"}
**Recommendations:**
{chr(10).join(f"β€’ {rec}" for rec in time_recommendations)}
---
### Smart Alerts
{chr(10).join(f"{alert}" for alert in alerts)}
"""
# Generate analytics
charts = None
heatmap = None
if show_analytics and count > 0:
charts = self._create_image_analytics(count, compliance, wait_time)
heatmap = self.create_density_heatmap(image.shape, centroids)
return annotated, metrics, charts, heatmap
def process_video(self, video_path, queue_type, show_analytics):
"""Process video with flow rate analysis"""
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return None, "Error: Could not read video", None, None
frame_counts = []
frame_indices = []
compliance_scores = []
last_annotated = None
last_centroids = []
frame_idx = 0
sample_interval = 10
while True:
ret, frame = cap.read()
if not ret:
break
if frame_idx % sample_interval == 0:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
boxes, count, centroids = self.detect_people(frame_rgb)
# Social distancing check
_, compliance, violations = self.check_social_distancing(centroids)
frame_counts.append(count)
frame_indices.append(frame_idx)
compliance_scores.append(compliance)
if len(frame_counts) > 0:
wait_time = self.calculate_wait_time(count, queue_type)
decision_text, _ = self.make_decision(wait_time)
last_annotated = self.annotate_image_advanced(frame_rgb, boxes, centroids,
violations, count, wait_time, decision_text)
last_centroids = centroids
frame_idx += 1
cap.release()
if len(frame_counts) == 0:
return None, "Error: No frames could be processed", None, None
# Calculate statistics
avg_count = np.mean(frame_counts)
max_count = np.max(frame_counts)
min_count = np.min(frame_counts)
avg_compliance = np.mean(compliance_scores)
# Queue flow rate analysis
flow_rate, flow_desc = self.calculate_queue_flow_rate(frame_counts)
# Use average count for decision
wait_time = self.calculate_wait_time(avg_count, queue_type)
decision_text, decision_type = self.make_decision(wait_time)
# Best time prediction
is_peak, time_recommendations = self.predict_best_time(queue_type, int(avg_count))
# Smart alerts
alerts = self.generate_smart_alerts(int(avg_count), wait_time, avg_compliance, queue_type)
# Enhanced metrics
metrics = f"""
### Queue Analysis Results (Video)
** Average People Count:** {avg_count:.1f}
** Maximum Count:** {max_count}
** Minimum Count:** {min_count}
** Frames Analyzed:** {len(frame_counts)}
** Estimated Waiting Time:** {wait_time:.0f} minutes
** Decision:** {decision_text}
---
### Queue Flow Analysis
**Flow Rate:** {flow_rate:.2f} people/frame
**Status:** {flow_desc}
---
### Social Distancing (Video Avg)
**Average Compliance:** {avg_compliance:.1f}%
**Status:** {"βœ… Good" if avg_compliance > 70 else "⚠️ Needs Improvement"}
---
### Best Time to Visit
**Current Status:** {"πŸ”΄ Peak Hour" if is_peak else "🟒 Off-Peak"}
**Recommendations:**
{chr(10).join(f"β€’ {rec}" for rec in time_recommendations)}
---
### Smart Alerts
{chr(10).join(f"{alert}" for alert in alerts)}
"""
# Generate analytics
charts = None
heatmap = None
if show_analytics:
charts = self._create_video_analytics_enhanced(frame_indices, frame_counts,
compliance_scores, avg_count, max_count, flow_rate)
if last_centroids:
heatmap = self.create_density_heatmap(last_annotated.shape, last_centroids)
return last_annotated, metrics, charts, heatmap
def _create_image_analytics(self, count, compliance, wait_time):
"""Create analytics dashboard for image"""
fig = Figure(figsize=(12, 4))
# Metrics bars
ax1 = fig.add_subplot(131)
metrics = ['People\nCount', 'Wait Time\n(min)', 'Compliance\n(%)']
values = [count, wait_time, compliance]
colors = ['#2196F3', '#FF9800', '#4CAF50']
bars = ax1.barh(metrics, values, color=colors)
ax1.set_xlabel('Value', fontsize=10)
ax1.set_title('Key Metrics', fontsize=12, fontweight='bold')
ax1.grid(axis='x', alpha=0.3)
for bar, value in zip(bars, values):
width = bar.get_width()
ax1.text(width, bar.get_y() + bar.get_height()/2,
f' {value:.1f}', va='center', fontsize=10, fontweight='bold')
# Decision gauge
ax2 = fig.add_subplot(132)
decision_categories = ['Go Now\n(<10min)', 'Moderate\n(10-20min)', 'Come Later\n(>20min)']
decision_position = 0 if wait_time < 10 else (1 if wait_time <= 20 else 2)
decision_colors = ['#4CAF50', '#FF9800', '#F44336']
ax2.bar(decision_categories, [1, 1, 1], color=['lightgray']*3, alpha=0.3)
ax2.bar(decision_categories[decision_position], 1, color=decision_colors[decision_position])
ax2.set_ylabel('Decision Level', fontsize=10)
ax2.set_title('Decision Indicator', fontsize=12, fontweight='bold')
ax2.set_ylim(0, 1.2)
ax2.set_yticks([])
# Compliance pie
ax3 = fig.add_subplot(133)
compliance_data = [compliance, 100-compliance]
colors_pie = ['#4CAF50', '#FFCDD2']
ax3.pie(compliance_data, labels=['Compliant', 'Gap'], autopct='%1.1f%%',
colors=colors_pie, startangle=90)
ax3.set_title('Social Distancing', fontsize=12, fontweight='bold')
fig.tight_layout()
# Convert to image
canvas = FigureCanvasAgg(fig)
canvas.draw()
buf = io.BytesIO()
canvas.print_png(buf)
buf.seek(0)
img = Image.open(buf)
plt.close(fig)
return img
def _create_video_analytics_enhanced(self, frame_indices, frame_counts, compliance_scores,
avg_count, max_count, flow_rate):
"""Create comprehensive analytics for video"""
fig = Figure(figsize=(16, 10))
# 1. People count timeline
ax1 = fig.add_subplot(231)
ax1.plot(frame_indices, frame_counts, marker='o', linewidth=2,
markersize=4, color='#2196F3', label='Detected People')
ax1.axhline(y=avg_count, color='#FF9800', linestyle='--',
linewidth=2, label=f'Average: {avg_count:.1f}')
ax1.axhline(y=max_count, color='#F44336', linestyle=':',
linewidth=2, label=f'Maximum: {max_count}')
ax1.set_xlabel('Frame Index', fontsize=10)
ax1.set_ylabel('People Count', fontsize=10)
ax1.set_title(' People Count Over Time', fontsize=11, fontweight='bold')
ax1.legend(fontsize=8)
ax1.grid(True, alpha=0.3)
# 2. Statistics bars
ax2 = fig.add_subplot(232)
stats = ['Average', 'Maximum', 'Minimum']
values = [avg_count, max_count, np.min(frame_counts)]
colors = ['#4CAF50', '#F44336', '#2196F3']
bars = ax2.bar(stats, values, color=colors, width=0.6)
ax2.set_ylabel('People Count', fontsize=10)
ax2.set_title(' Queue Statistics', fontsize=11, fontweight='bold')
ax2.grid(axis='y', alpha=0.3)
for bar, value in zip(bars, values):
height = bar.get_height()
ax2.text(bar.get_x() + bar.get_width()/2., height,
f'{value:.1f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
# 3. Compliance timeline
ax3 = fig.add_subplot(233)
ax3.plot(frame_indices, compliance_scores, marker='s', linewidth=2,
markersize=4, color='#4CAF50', label='Compliance')
ax3.axhline(y=70, color='red', linestyle='--', linewidth=1, label='Minimum (70%)')
ax3.set_xlabel('Frame Index', fontsize=10)
ax3.set_ylabel('Compliance (%)', fontsize=10)
ax3.set_title(' Social Distancing Compliance', fontsize=11, fontweight='bold')
ax3.legend(fontsize=8)
ax3.grid(True, alpha=0.3)
ax3.set_ylim(0, 105)
# 4. Distribution histogram
ax4 = fig.add_subplot(234)
ax4.hist(frame_counts, bins=10, color='#9C27B0', edgecolor='black', alpha=0.7)
ax4.axvline(avg_count, color='red', linestyle='--', linewidth=2, label='Mean')
ax4.set_xlabel('People Count', fontsize=10)
ax4.set_ylabel('Frequency', fontsize=10)
ax4.set_title(' Count Distribution', fontsize=11, fontweight='bold')
ax4.legend(fontsize=8)
ax4.grid(axis='y', alpha=0.3)
# 5. Flow rate indicator
ax5 = fig.add_subplot(235)
flow_categories = ['Fast\nDecrease', 'Stable', 'Slow\nIncrease']
flow_position = 0 if flow_rate < -0.5 else (1 if abs(flow_rate) <= 0.5 else 2)
flow_colors = ['#4CAF50', '#FF9800', '#F44336']
ax5.bar(flow_categories, [1, 1, 1], color=['lightgray']*3, alpha=0.3)
ax5.bar(flow_categories[flow_position], 1, color=flow_colors[flow_position])
ax5.set_ylabel('Flow Status', fontsize=10)
ax5.set_title(' Queue Flow Rate', fontsize=11, fontweight='bold')
ax5.set_ylim(0, 1.2)
ax5.set_yticks([])
ax5.text(flow_position, 0.5, f'{flow_rate:.2f}', ha='center', va='center',
fontsize=14, fontweight='bold', color='white')
# 6. Summary metrics
ax6 = fig.add_subplot(236)
ax6.axis('off')
summary_text = f"""
SUMMARY INSIGHTS
Total Frames: {len(frame_counts)}
Avg People: {avg_count:.1f}
Peak Count: {max_count}
Avg Compliance: {np.mean(compliance_scores):.1f}%
Flow Rate: {flow_rate:.2f}/frame
Trend: {'Decreasing βœ“' if flow_rate < -0.3 else ('Stable' if abs(flow_rate) < 0.3 else 'Increasing ⚠')}
"""
ax6.text(0.1, 0.5, summary_text, fontsize=10, family='monospace',
verticalalignment='center', bbox=dict(boxstyle='round',
facecolor='wheat', alpha=0.5))
fig.suptitle(' COMPREHENSIVE VIDEO ANALYTICS DASHBOARD',
fontsize=14, fontweight='bold', y=0.98)
fig.tight_layout(rect=[0, 0, 1, 0.96])
# Convert to image
canvas = FigureCanvasAgg(fig)
canvas.draw()
buf = io.BytesIO()
canvas.print_png(buf)
buf.seek(0)
img = Image.open(buf)
plt.close(fig)
return img
def analyze_queue(file, queue_type, show_analytics):
"""Main analysis function"""
if file is None:
return None, "⚠️ Please upload an image or video file.", None, None
analyzer = EnhancedQueueAnalyzer()
file_ext = os.path.splitext(file.name)[1].lower()
try:
if file_ext in ['.jpg', '.jpeg', '.png', '.bmp', '.webp']:
return analyzer.process_image(file.name, queue_type, show_analytics)
elif file_ext in ['.mp4', '.avi', '.mov', '.mkv', '.webm']:
return analyzer.process_video(file.name, queue_type, show_analytics)
else:
return None, "❌ Unsupported file format.", None, None
except Exception as e:
return None, f"❌ Error: {str(e)}", None, None
def create_interface():
"""Create enhanced Gradio interface"""
with gr.Blocks(theme=gr.themes.Soft(), title="Smart Queue Analyzer - Enhanced") as app:
gr.Markdown("""
# Smart Public Queue Traffic Analyzer - ENHANCED VERSION
### AI-Powered Decision Assistant with Advanced Analytics
**New Features:** Crowd Density Heatmap | Queue Flow Analysis | Social Distancing Check | Smart Alerts | Peak Time Prediction
""")
with gr.Row():
# Input Panel
with gr.Column(scale=1):
gr.Markdown("### Input Configuration")
file_input = gr.File(
label="Upload Image or Video",
file_types=["image", "video"],
type="filepath"
)
queue_type = gr.Dropdown(
choices=["College Office", "Hospital", "Railway Counter", "Supermarket"],
value="College Office",
label="Queue Type",
info="Affects wait time calculation and peak hour prediction"
)
show_analytics = gr.Checkbox(
label="Enable Advanced Analytics",
value=True,
info="Show comprehensive charts and heatmaps"
)
analyze_btn = gr.Button(
" Analyze Queue",
variant="primary",
size="lg"
)
gr.Markdown("""
---
### Enhanced Features
βœ… **Social Distancing Check**
βœ… **Crowd Density Heatmap**
βœ… **Queue Flow Rate (Video)**
βœ… **Peak Time Predictions**
βœ… **Smart Alert System**
βœ… **Comprehensive Analytics**
---
**Supported Formats:**
- Images: JPG, PNG, BMP, WEBP
- Videos: MP4, AVI, MOV
""")
# Output Panel
with gr.Column(scale=2):
gr.Markdown("### Analysis Dashboard")
with gr.Tabs():
with gr.Tab(" Annotated Output"):
output_image = gr.Image(
label="Detection & Analysis",
type="numpy",
height=400
)
with gr.Tab(" Density Heatmap"):
output_heatmap = gr.Image(
label="Crowd Density Heatmap",
type="numpy",
height=400
)
output_metrics = gr.Markdown(
value="*Upload a file and click Analyze to see results*"
)
with gr.Accordion(" Advanced Analytics", open=True):
output_chart = gr.Image(
label="Comprehensive Analytics Dashboard",
type="pil"
)
gr.Markdown("""
---
### About This Project
**Technology Stack:** OpenCV HOG+SVM | Advanced Computer Vision | AI Analytics
**Project Type:** Final Year Engineering Project
**Innovation Level:** Conference-Ready with 5+ Innovative Features
**Deployment:** CPU-Optimized for Hugging Face Spaces (Free Tier)
**Key Innovations:**
1. Real-time crowd density heatmap visualization
2. Social distancing compliance monitoring
3. Queue flow rate prediction and trend analysis
4. Intelligent peak-time recommendations
5. Multi-metric smart alert system
6. Comprehensive analytics dashboard
**Use Cases:** Campus Management | Hospital Traffic Control | Public Transport | Retail Analytics
""")
# Event handlers
analyze_btn.click(
fn=analyze_queue,
inputs=[file_input, queue_type, show_analytics],
outputs=[output_image, output_metrics, output_chart, output_heatmap]
)
return app
if __name__ == "__main__":
app = create_interface()
app.launch()