File size: 7,346 Bytes
afba55c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | import gradio as gr
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Define edge AI models and their benchmark metrics
MODELS = {
"MobileNetV2": {"latency_ms": 25, "accuracy": 0.87, "size_mb": 14, "power_mw": 120},
"EfficientNet-B0": {"latency_ms": 32, "accuracy": 0.91, "size_mb": 20, "power_mw": 145},
"SqueezeNet": {"latency_ms": 18, "accuracy": 0.82, "size_mb": 5, "power_mw": 95},
"ResNet18": {"latency_ms": 45, "accuracy": 0.88, "size_mb": 44, "power_mw": 180},
"TinyYOLO": {"latency_ms": 38, "accuracy": 0.75, "size_mb": 34, "power_mw": 165},
"MobileViT-S": {"latency_ms": 28, "accuracy": 0.89, "size_mb": 18, "power_mw": 135},
}
def create_benchmark_chart(model1, model2, metric):
"""Create comparison chart for selected models and metric"""
if not model1 or not model2:
return None, "Please select two models to compare"
metrics_map = {
"Latency (ms)": "latency_ms",
"Accuracy (%)": "accuracy",
"Model Size (MB)": "size_mb",
"Power (mW)": "power_mw"
}
metric_key = metrics_map[metric]
m1_value = MODELS[model1][metric_key]
m2_value = MODELS[model2][metric_key]
# Convert accuracy to percentage for display
if metric_key == "accuracy":
m1_value = m1_value * 100
m2_value = m2_value * 100
fig = go.Figure(data=[
go.Bar(name=model1, x=[metric], y=[m1_value], marker_color='#4ecdc4'),
go.Bar(name=model2, x=[metric], y=[m2_value], marker_color='#ff6b6b')
])
fig.update_layout(
title=f"Edge AI Model Comparison: {metric}",
barmode='group',
height=400,
yaxis_title=metric,
showlegend=True
)
winner = model1 if m1_value < m2_value and metric_key == "latency_ms" else \
(model1 if m1_value < m2_value and metric_key == "power_mw" else \
(model1 if m1_value > m2_value else model2))
summary = f"β
**{winner}** performs better on {metric}\n\n"
summary += f"**{model1}**: {m1_value:.2f}\n"
summary += f"**{model2}**: {m2_value:.2f}"
return fig, summary
def create_all_metrics_comparison(model1, model2):
"""Create comprehensive comparison of all metrics"""
if not model1 or not model2:
return None, "Please select two models to compare"
# Create subplot with 4 metrics
fig = make_subplots(
rows=2, cols=2,
subplot_titles=('Latency (Lower is Better)', 'Accuracy (Higher is Better)',
'Model Size (Lower is Better)', 'Power Consumption (Lower is Better)')
)
# Latency
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["latency_ms"], MODELS[model2]["latency_ms"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=1, col=1)
# Accuracy
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["accuracy"]*100, MODELS[model2]["accuracy"]*100],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=1, col=2)
# Size
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["size_mb"], MODELS[model2]["size_mb"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=2, col=1)
# Power
fig.add_trace(go.Bar(x=[model1, model2],
y=[MODELS[model1]["power_mw"], MODELS[model2]["power_mw"]],
marker_color=['#4ecdc4', '#ff6b6b'],
showlegend=False), row=2, col=2)
fig.update_layout(height=700, title_text="Complete Edge AI Model Benchmark Comparison")
# Generate detailed comparison
summary = "## π Complete Benchmark Analysis\n\n"
summary += f"### {model1} vs {model2}\n\n"
summary += f"**Latency**: {MODELS[model1]['latency_ms']}ms vs {MODELS[model2]['latency_ms']}ms\n"
summary += f"**Accuracy**: {MODELS[model1]['accuracy']*100:.1f}% vs {MODELS[model2]['accuracy']*100:.1f}%\n"
summary += f"**Model Size**: {MODELS[model1]['size_mb']}MB vs {MODELS[model2]['size_mb']}MB\n"
summary += f"**Power**: {MODELS[model1]['power_mw']}mW vs {MODELS[model2]['power_mw']}mW\n"
return fig, summary
# Create Gradio interface
with gr.Blocks(title="Edge AI Model Benchmark - Anktechsol", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# π Edge AI Model Benchmark Tool
### by **Anktechsol** - AI + IoT Experts
Compare performance metrics of popular Edge AI models for deployment on IoT devices, edge gateways, and embedded systems.
Perfect for selecting the right model for your AIoT applications!
""")
with gr.Tabs():
with gr.Tab("π Single Metric Comparison"):
with gr.Row():
with gr.Column():
model1_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
model2_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="EfficientNet-B0")
metric_select = gr.Dropdown(
choices=["Latency (ms)", "Accuracy (%)", "Model Size (MB)", "Power (mW)"],
label="Select Metric",
value="Latency (ms)"
)
compare_btn = gr.Button("π Compare Models", variant="primary")
with gr.Column():
plot_single = gr.Plot(label="Comparison Chart")
summary_single = gr.Markdown()
compare_btn.click(
fn=create_benchmark_chart,
inputs=[model1_single, model2_single, metric_select],
outputs=[plot_single, summary_single]
)
with gr.Tab("ππ All Metrics Comparison"):
with gr.Row():
with gr.Column(scale=1):
model1_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
model2_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="SqueezeNet")
compare_all_btn = gr.Button("π Compare All Metrics", variant="primary", size="lg")
gr.Markdown("""
---
### π Resources
- [Anktechsol Website](https://anktechsol.com)
- [More AIoT Tools](https://huggingface.co/anktechsol)
""")
with gr.Column(scale=3):
plot_all = gr.Plot(label="Complete Benchmark")
summary_all = gr.Markdown()
compare_all_btn.click(
fn=create_all_metrics_comparison,
inputs=[model1_all, model2_all],
outputs=[plot_all, summary_all]
)
# Auto-load default comparison
demo.load(
fn=create_all_metrics_comparison,
inputs=[gr.State("MobileNetV2"), gr.State("EfficientNet-B0")],
outputs=[plot_all, summary_all]
)
if __name__ == "__main__":
demo.launch() |