anktechsol's picture
Create app.py
afba55c verified
import gradio as gr
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Define edge AI models and their benchmark metrics
MODELS = {
"MobileNetV2": {"latency_ms": 25, "accuracy": 0.87, "size_mb": 14, "power_mw": 120},
"EfficientNet-B0": {"latency_ms": 32, "accuracy": 0.91, "size_mb": 20, "power_mw": 145},
"SqueezeNet": {"latency_ms": 18, "accuracy": 0.82, "size_mb": 5, "power_mw": 95},
"ResNet18": {"latency_ms": 45, "accuracy": 0.88, "size_mb": 44, "power_mw": 180},
"TinyYOLO": {"latency_ms": 38, "accuracy": 0.75, "size_mb": 34, "power_mw": 165},
"MobileViT-S": {"latency_ms": 28, "accuracy": 0.89, "size_mb": 18, "power_mw": 135},
}
def create_benchmark_chart(model1, model2, metric):
"""Create comparison chart for selected models and metric"""
if not model1 or not model2:
return None, "Please select two models to compare"
metrics_map = {
"Latency (ms)": "latency_ms",
"Accuracy (%)": "accuracy",
"Model Size (MB)": "size_mb",
"Power (mW)": "power_mw"
}
metric_key = metrics_map[metric]
m1_value = MODELS[model1][metric_key]
m2_value = MODELS[model2][metric_key]
# Convert accuracy to percentage for display
if metric_key == "accuracy":
m1_value = m1_value * 100
m2_value = m2_value * 100
fig = go.Figure(data=[
go.Bar(name=model1, x=[metric], y=[m1_value], marker_color='#4ecdc4'),
go.Bar(name=model2, x=[metric], y=[m2_value], marker_color='#ff6b6b')
])
fig.update_layout(
title=f"Edge AI Model Comparison: {metric}",
barmode='group',
height=400,
yaxis_title=metric,
showlegend=True
)
winner = model1 if m1_value < m2_value and metric_key == "latency_ms" else \
(model1 if m1_value < m2_value and metric_key == "power_mw" else \
(model1 if m1_value > m2_value else model2))
summary = f"βœ… **{winner}** performs better on {metric}\n\n"
summary += f"**{model1}**: {m1_value:.2f}\n"
summary += f"**{model2}**: {m2_value:.2f}"
return fig, summary
def create_all_metrics_comparison(model1, model2):
"""Create comprehensive comparison of all metrics"""
if not model1 or not model2:
return None, "Please select two models to compare"
# Create subplot with 4 metrics
fig = make_subplots(
rows=2, cols=2,
subplot_titles=('Latency (Lower is Better)', 'Accuracy (Higher is Better)',
'Model Size (Lower is Better)', 'Power Consumption (Lower is Better)')
)
# Latency
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["latency_ms"], MODELS[model2]["latency_ms"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=1, col=1)
# Accuracy
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["accuracy"]*100, MODELS[model2]["accuracy"]*100],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=1, col=2)
# Size
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["size_mb"], MODELS[model2]["size_mb"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=2, col=1)
# Power
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["power_mw"], MODELS[model2]["power_mw"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=2, col=2)
fig.update_layout(height=700, title_text="Complete Edge AI Model Benchmark Comparison")
# Generate detailed comparison
summary = "## πŸ“Š Complete Benchmark Analysis\n\n"
summary += f"### {model1} vs {model2}\n\n"
summary += f"**Latency**: {MODELS[model1]['latency_ms']}ms vs {MODELS[model2]['latency_ms']}ms\n"
summary += f"**Accuracy**: {MODELS[model1]['accuracy']*100:.1f}% vs {MODELS[model2]['accuracy']*100:.1f}%\n"
summary += f"**Model Size**: {MODELS[model1]['size_mb']}MB vs {MODELS[model2]['size_mb']}MB\n"
summary += f"**Power**: {MODELS[model1]['power_mw']}mW vs {MODELS[model2]['power_mw']}mW\n"
return fig, summary
# Create Gradio interface
with gr.Blocks(title="Edge AI Model Benchmark - Anktechsol", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# πŸš€ Edge AI Model Benchmark Tool
### by **Anktechsol** - AI + IoT Experts
Compare performance metrics of popular Edge AI models for deployment on IoT devices, edge gateways, and embedded systems.
Perfect for selecting the right model for your AIoT applications!
""")
with gr.Tabs():
with gr.Tab("πŸ“Š Single Metric Comparison"):
with gr.Row():
with gr.Column():
model1_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
model2_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="EfficientNet-B0")
metric_select = gr.Dropdown(
choices=["Latency (ms)", "Accuracy (%)", "Model Size (MB)", "Power (mW)"],
label="Select Metric",
value="Latency (ms)"
)
compare_btn = gr.Button("πŸ” Compare Models", variant="primary")
with gr.Column():
plot_single = gr.Plot(label="Comparison Chart")
summary_single = gr.Markdown()
compare_btn.click(
fn=create_benchmark_chart,
inputs=[model1_single, model2_single, metric_select],
outputs=[plot_single, summary_single]
)
with gr.Tab("πŸ“ŠπŸ“Š All Metrics Comparison"):
with gr.Row():
with gr.Column(scale=1):
model1_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
model2_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="SqueezeNet")
compare_all_btn = gr.Button("πŸ” Compare All Metrics", variant="primary", size="lg")
gr.Markdown("""
---
### πŸ”— Resources
- [Anktechsol Website](https://anktechsol.com)
- [More AIoT Tools](https://huggingface.co/anktechsol)
""")
with gr.Column(scale=3):
plot_all = gr.Plot(label="Complete Benchmark")
summary_all = gr.Markdown()
compare_all_btn.click(
fn=create_all_metrics_comparison,
inputs=[model1_all, model2_all],
outputs=[plot_all, summary_all]
)
# Auto-load default comparison
demo.load(
fn=create_all_metrics_comparison,
inputs=[gr.State("MobileNetV2"), gr.State("EfficientNet-B0")],
outputs=[plot_all, summary_all]
)
if __name__ == "__main__":
demo.launch()