anktechsol commited on
Commit
afba55c
Β·
verified Β·
1 Parent(s): f0edb41

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -0
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import plotly.graph_objects as go
4
+ from plotly.subplots import make_subplots
5
+
6
+ # Define edge AI models and their benchmark metrics
7
+ MODELS = {
8
+ "MobileNetV2": {"latency_ms": 25, "accuracy": 0.87, "size_mb": 14, "power_mw": 120},
9
+ "EfficientNet-B0": {"latency_ms": 32, "accuracy": 0.91, "size_mb": 20, "power_mw": 145},
10
+ "SqueezeNet": {"latency_ms": 18, "accuracy": 0.82, "size_mb": 5, "power_mw": 95},
11
+ "ResNet18": {"latency_ms": 45, "accuracy": 0.88, "size_mb": 44, "power_mw": 180},
12
+ "TinyYOLO": {"latency_ms": 38, "accuracy": 0.75, "size_mb": 34, "power_mw": 165},
13
+ "MobileViT-S": {"latency_ms": 28, "accuracy": 0.89, "size_mb": 18, "power_mw": 135},
14
+ }
15
+
16
+ def create_benchmark_chart(model1, model2, metric):
17
+ """Create comparison chart for selected models and metric"""
18
+ if not model1 or not model2:
19
+ return None, "Please select two models to compare"
20
+
21
+ metrics_map = {
22
+ "Latency (ms)": "latency_ms",
23
+ "Accuracy (%)": "accuracy",
24
+ "Model Size (MB)": "size_mb",
25
+ "Power (mW)": "power_mw"
26
+ }
27
+
28
+ metric_key = metrics_map[metric]
29
+
30
+ m1_value = MODELS[model1][metric_key]
31
+ m2_value = MODELS[model2][metric_key]
32
+
33
+ # Convert accuracy to percentage for display
34
+ if metric_key == "accuracy":
35
+ m1_value = m1_value * 100
36
+ m2_value = m2_value * 100
37
+
38
+ fig = go.Figure(data=[
39
+ go.Bar(name=model1, x=[metric], y=[m1_value], marker_color='#4ecdc4'),
40
+ go.Bar(name=model2, x=[metric], y=[m2_value], marker_color='#ff6b6b')
41
+ ])
42
+
43
+ fig.update_layout(
44
+ title=f"Edge AI Model Comparison: {metric}",
45
+ barmode='group',
46
+ height=400,
47
+ yaxis_title=metric,
48
+ showlegend=True
49
+ )
50
+
51
+ winner = model1 if m1_value < m2_value and metric_key == "latency_ms" else \
52
+ (model1 if m1_value < m2_value and metric_key == "power_mw" else \
53
+ (model1 if m1_value > m2_value else model2))
54
+
55
+ summary = f"βœ… **{winner}** performs better on {metric}\n\n"
56
+ summary += f"**{model1}**: {m1_value:.2f}\n"
57
+ summary += f"**{model2}**: {m2_value:.2f}"
58
+
59
+ return fig, summary
60
+
61
+ def create_all_metrics_comparison(model1, model2):
62
+ """Create comprehensive comparison of all metrics"""
63
+ if not model1 or not model2:
64
+ return None, "Please select two models to compare"
65
+
66
+ # Create subplot with 4 metrics
67
+ fig = make_subplots(
68
+ rows=2, cols=2,
69
+ subplot_titles=('Latency (Lower is Better)', 'Accuracy (Higher is Better)',
70
+ 'Model Size (Lower is Better)', 'Power Consumption (Lower is Better)')
71
+ )
72
+
73
+ # Latency
74
+ fig.add_trace(go.Bar(x=[model1, model2],
75
+ y=[MODELS[model1]["latency_ms"], MODELS[model2]["latency_ms"]],
76
+ marker_color=['#4ecdc4', '#ff6b6b'],
77
+ showlegend=False), row=1, col=1)
78
+
79
+ # Accuracy
80
+ fig.add_trace(go.Bar(x=[model1, model2],
81
+ y=[MODELS[model1]["accuracy"]*100, MODELS[model2]["accuracy"]*100],
82
+ marker_color=['#4ecdc4', '#ff6b6b'],
83
+ showlegend=False), row=1, col=2)
84
+
85
+ # Size
86
+ fig.add_trace(go.Bar(x=[model1, model2],
87
+ y=[MODELS[model1]["size_mb"], MODELS[model2]["size_mb"]],
88
+ marker_color=['#4ecdc4', '#ff6b6b'],
89
+ showlegend=False), row=2, col=1)
90
+
91
+ # Power
92
+ fig.add_trace(go.Bar(x=[model1, model2],
93
+ y=[MODELS[model1]["power_mw"], MODELS[model2]["power_mw"]],
94
+ marker_color=['#4ecdc4', '#ff6b6b'],
95
+ showlegend=False), row=2, col=2)
96
+
97
+ fig.update_layout(height=700, title_text="Complete Edge AI Model Benchmark Comparison")
98
+
99
+ # Generate detailed comparison
100
+ summary = "## πŸ“Š Complete Benchmark Analysis\n\n"
101
+ summary += f"### {model1} vs {model2}\n\n"
102
+ summary += f"**Latency**: {MODELS[model1]['latency_ms']}ms vs {MODELS[model2]['latency_ms']}ms\n"
103
+ summary += f"**Accuracy**: {MODELS[model1]['accuracy']*100:.1f}% vs {MODELS[model2]['accuracy']*100:.1f}%\n"
104
+ summary += f"**Model Size**: {MODELS[model1]['size_mb']}MB vs {MODELS[model2]['size_mb']}MB\n"
105
+ summary += f"**Power**: {MODELS[model1]['power_mw']}mW vs {MODELS[model2]['power_mw']}mW\n"
106
+
107
+ return fig, summary
108
+
109
+ # Create Gradio interface
110
+ with gr.Blocks(title="Edge AI Model Benchmark - Anktechsol", theme=gr.themes.Soft()) as demo:
111
+ gr.Markdown("""
112
+ # πŸš€ Edge AI Model Benchmark Tool
113
+ ### by **Anktechsol** - AI + IoT Experts
114
+
115
+ Compare performance metrics of popular Edge AI models for deployment on IoT devices, edge gateways, and embedded systems.
116
+ Perfect for selecting the right model for your AIoT applications!
117
+ """)
118
+
119
+ with gr.Tabs():
120
+ with gr.Tab("πŸ“Š Single Metric Comparison"):
121
+ with gr.Row():
122
+ with gr.Column():
123
+ model1_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
124
+ model2_single = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="EfficientNet-B0")
125
+ metric_select = gr.Dropdown(
126
+ choices=["Latency (ms)", "Accuracy (%)", "Model Size (MB)", "Power (mW)"],
127
+ label="Select Metric",
128
+ value="Latency (ms)"
129
+ )
130
+ compare_btn = gr.Button("πŸ” Compare Models", variant="primary")
131
+
132
+ with gr.Column():
133
+ plot_single = gr.Plot(label="Comparison Chart")
134
+ summary_single = gr.Markdown()
135
+
136
+ compare_btn.click(
137
+ fn=create_benchmark_chart,
138
+ inputs=[model1_single, model2_single, metric_select],
139
+ outputs=[plot_single, summary_single]
140
+ )
141
+
142
+ with gr.Tab("πŸ“ŠπŸ“Š All Metrics Comparison"):
143
+ with gr.Row():
144
+ with gr.Column(scale=1):
145
+ model1_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 1", value="MobileNetV2")
146
+ model2_all = gr.Dropdown(choices=list(MODELS.keys()), label="Model 2", value="SqueezeNet")
147
+ compare_all_btn = gr.Button("πŸ” Compare All Metrics", variant="primary", size="lg")
148
+ gr.Markdown("""
149
+ ---
150
+ ### πŸ”— Resources
151
+ - [Anktechsol Website](https://anktechsol.com)
152
+ - [More AIoT Tools](https://huggingface.co/anktechsol)
153
+ """)
154
+
155
+ with gr.Column(scale=3):
156
+ plot_all = gr.Plot(label="Complete Benchmark")
157
+ summary_all = gr.Markdown()
158
+
159
+ compare_all_btn.click(
160
+ fn=create_all_metrics_comparison,
161
+ inputs=[model1_all, model2_all],
162
+ outputs=[plot_all, summary_all]
163
+ )
164
+
165
+ # Auto-load default comparison
166
+ demo.load(
167
+ fn=create_all_metrics_comparison,
168
+ inputs=[gr.State("MobileNetV2"), gr.State("EfficientNet-B0")],
169
+ outputs=[plot_all, summary_all]
170
+ )
171
+
172
+ if __name__ == "__main__":
173
+ demo.launch()