Preyanshz commited on
Commit
c84e944
·
verified ·
1 Parent(s): 13a407b

Upload 2 files

Browse files
Files changed (2) hide show
  1. .streamlit/config.toml +2 -0
  2. app.py +284 -0
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [server]
2
+ maxUploadSize = 1024
app.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import tempfile
3
+ import pandas as pd
4
+ import numpy as np
5
+ from PIL import Image
6
+ from ultralytics import YOLO
7
+
8
+ def save_uploaded_file(uploaded_file):
9
+ """Save an uploaded file to a temporary file and return its path."""
10
+ with tempfile.NamedTemporaryFile(delete=False, suffix=uploaded_file.name) as tmp_file:
11
+ tmp_file.write(uploaded_file.getbuffer())
12
+ return tmp_file.name
13
+
14
+ def yolo_inference_tool():
15
+ """
16
+ Single-model, single-image inference subpage (example).
17
+ """
18
+ st.header("YOLO Model Inference Tool")
19
+ st.write("Upload an image and a YOLO model (.pt) file to run inference and view detailed results.")
20
+
21
+ image_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"], key="inference_image")
22
+ model_file = st.file_uploader("Upload YOLO model (.pt)", type=["pt"], key="inference_model")
23
+
24
+ if st.button("Submit (Single-Model Inference)"):
25
+ if not image_file or not model_file:
26
+ st.error("Please upload both an image and a model.")
27
+ return
28
+
29
+ # Save files
30
+ image_path = save_uploaded_file(image_file)
31
+ model_path = save_uploaded_file(model_file)
32
+
33
+ # Load image
34
+ try:
35
+ image = Image.open(image_file).convert("RGB")
36
+ except Exception as e:
37
+ st.error(f"Error reading image: {e}")
38
+ return
39
+
40
+ st.subheader("Image Details")
41
+ st.write(f"**Image Size:** {image.size[0]} x {image.size[1]}")
42
+ st.write(f"**File Type:** {image_file.type}")
43
+
44
+ # Load model
45
+ try:
46
+ model = YOLO(model_path)
47
+ except Exception as e:
48
+ st.error(f"Error loading model: {e}")
49
+ return
50
+
51
+ # Inference
52
+ st.subheader("Inference Results")
53
+ try:
54
+ results = model(np.array(image))
55
+ except Exception as e:
56
+ st.error(f"Inference error: {e}")
57
+ return
58
+
59
+ r = results[0]
60
+ boxes_data = []
61
+ if r.boxes is not None and len(r.boxes) > 0:
62
+ for i in range(len(r.boxes)):
63
+ coords = r.boxes.xyxy[i].cpu().numpy() if hasattr(r.boxes.xyxy[i], "cpu") else r.boxes.xyxy[i]
64
+ conf = r.boxes.conf[i].cpu().numpy() if hasattr(r.boxes.conf[i], "cpu") else r.boxes.conf[i]
65
+ cls_idx = int(r.boxes.cls[i].cpu().numpy()) if hasattr(r.boxes.cls[i], "cpu") else int(r.boxes.cls[i])
66
+ class_name = r.names.get(cls_idx, "Unknown")
67
+ boxes_data.append({
68
+ "Box": i + 1,
69
+ "Coordinates": f"[{coords[0]:.1f}, {coords[1]:.1f}, {coords[2]:.1f}, {coords[3]:.1f}]",
70
+ "Confidence": f"{conf:.2f}",
71
+ "Class": class_name
72
+ })
73
+ df_boxes = pd.DataFrame(boxes_data)
74
+ st.subheader("Detected Objects")
75
+ st.dataframe(df_boxes, use_container_width=True)
76
+ else:
77
+ st.write("No objects detected.")
78
+
79
+ # Annotated image
80
+ try:
81
+ annotated_img_bgr = r.plot(conf=True, boxes=True, labels=True)
82
+ annotated_img_rgb = Image.fromarray(annotated_img_bgr[..., ::-1])
83
+ st.subheader("Annotated Image")
84
+ st.image(annotated_img_rgb, caption="Inference Output", use_container_width=True)
85
+ except Exception as e:
86
+ st.error(f"Error generating annotated image: {e}")
87
+
88
+ def yolo_model_comparison_tool():
89
+ """
90
+ Multi-model, multi-image comparison subpage,
91
+ with Weighted Scoring that uses a reciprocal-based speed metric.
92
+ """
93
+ st.header("YOLO Models Comparison Tool (Multi-Image, Weighted Score)")
94
+ st.write(
95
+ "Upload **one or more images** and **multiple YOLO model (.pt) files**. "
96
+ "Then click **Submit** to run inference across all images with each model. "
97
+ "We aggregate metrics (Avg Inference Time, Total Detections, Avg Confidence) "
98
+ "and compute a Weighted Score that balances these factors.\n\n"
99
+ "**Speed** is handled by converting inference time to a 'speed' value: `1/time`. "
100
+ "That way, the fastest model is near 1, and slower models get fractions in (0,1), "
101
+ "rather than forcing the slowest model to 0."
102
+ )
103
+
104
+ images = st.file_uploader("Upload Images", type=["jpg", "jpeg", "png"], key="comparison_images", accept_multiple_files=True)
105
+ model_files = st.file_uploader("Upload YOLO models (.pt)", type=["pt"], key="comparison_models", accept_multiple_files=True)
106
+
107
+ # Example weights. You can expose them as sliders if you want user customization.
108
+ alpha_detection = 0.4
109
+ beta_confidence = 0.3
110
+ gamma_speed = 0.3 # speed = reciprocal of time
111
+
112
+ if st.button("Submit (Multi-Model Comparison)"):
113
+ if not images or not model_files:
114
+ st.error("Please upload at least one image and at least one model.")
115
+ return
116
+
117
+ # We'll store aggregated metrics here
118
+ model_agg_data = {}
119
+ # We'll store results for each (model, image) so we can display side-by-side
120
+ model_image_results = {m.name: {} for m in model_files}
121
+
122
+ for model_file in model_files:
123
+ model_path = save_uploaded_file(model_file)
124
+ try:
125
+ model = YOLO(model_path)
126
+ except Exception as e:
127
+ st.error(f"Error loading model {model_file.name}: {e}")
128
+ continue
129
+
130
+ total_inference_time = 0.0
131
+ total_detections = 0
132
+ sum_confidences = 0.0
133
+ total_conf_count = 0
134
+
135
+ for img_file in images:
136
+ try:
137
+ pil_img = Image.open(img_file).convert("RGB")
138
+ np_img = np.array(pil_img)
139
+ except Exception as e:
140
+ st.error(f"Error reading image {img_file.name}: {e}")
141
+ continue
142
+
143
+ # Run inference
144
+ try:
145
+ result = model(np_img)
146
+ except Exception as e:
147
+ st.error(f"Inference error for model {model_file.name} on {img_file.name}: {e}")
148
+ continue
149
+
150
+ r = result[0]
151
+ model_image_results[model_file.name][img_file.name] = r
152
+
153
+ # Accumulate inference time
154
+ if isinstance(r.speed, dict) and "inference" in r.speed:
155
+ total_inference_time += r.speed["inference"]
156
+
157
+ # Count detections & confidence
158
+ if r.boxes is not None:
159
+ det_count = len(r.boxes)
160
+ total_detections += det_count
161
+ if det_count > 0:
162
+ confs = r.boxes.conf.cpu().numpy() if hasattr(r.boxes.conf, "cpu") else r.boxes.conf
163
+ sum_confidences += confs.sum()
164
+ total_conf_count += det_count
165
+
166
+ # After all images for this model
167
+ image_count = len(images)
168
+ avg_inference_time = total_inference_time / image_count if image_count > 0 else float("inf")
169
+ avg_confidence = sum_confidences / total_conf_count if total_conf_count > 0 else 0.0
170
+
171
+ model_agg_data[model_file.name] = {
172
+ "Model File": model_file.name,
173
+ "Avg Inference Time (ms)": avg_inference_time,
174
+ "Total Detections": total_detections,
175
+ "Average Confidence": avg_confidence
176
+ }
177
+
178
+ if not model_agg_data:
179
+ st.write("No valid models processed.")
180
+ return
181
+
182
+ # Display aggregated metrics
183
+ df = pd.DataFrame(model_agg_data.values())
184
+ st.subheader("Aggregated Metrics (Across All Images)")
185
+ st.dataframe(df, use_container_width=True)
186
+
187
+ # Weighted Scoring with reciprocal-based speed
188
+ # 1) Normalize detection & confidence as usual
189
+ detection_max = df["Total Detections"].max()
190
+ confidence_max = df["Average Confidence"].max()
191
+ if detection_max == 0: detection_max = 1
192
+ if confidence_max == 0: confidence_max = 1
193
+
194
+ df["Detection Norm"] = df["Total Detections"] / detection_max
195
+ df["Confidence Norm"] = df["Average Confidence"] / confidence_max
196
+
197
+ # 2) Convert time to speed = 1 / time, then normalize
198
+ # If time = 0, add small epsilon to avoid dividing by zero
199
+ eps = 1e-9
200
+ df["Speed Val"] = 1.0 / (df["Avg Inference Time (ms)"] + eps)
201
+ max_speed_val = df["Speed Val"].max() if not df["Speed Val"].isnull().all() else 1
202
+ if max_speed_val == 0:
203
+ max_speed_val = 1 # fallback
204
+
205
+ df["Speed Norm"] = df["Speed Val"] / max_speed_val
206
+
207
+ # 3) Weighted Score
208
+ df["Weighted Score"] = (
209
+ alpha_detection * df["Detection Norm"] +
210
+ beta_confidence * df["Confidence Norm"] +
211
+ gamma_speed * df["Speed Norm"]
212
+ )
213
+
214
+ st.subheader("Weighted Score Analysis")
215
+ st.write(f"Weights: Detection={alpha_detection}, Confidence={beta_confidence}, Speed={gamma_speed}")
216
+ st.dataframe(df[[
217
+ "Model File",
218
+ "Avg Inference Time (ms)",
219
+ "Total Detections",
220
+ "Average Confidence",
221
+ "Detection Norm",
222
+ "Confidence Norm",
223
+ "Speed Val",
224
+ "Speed Norm",
225
+ "Weighted Score"
226
+ ]], use_container_width=True)
227
+
228
+ # Identify best overall model (highest Weighted Score)
229
+ best_idx = df["Weighted Score"].idxmax()
230
+ best_model = df.loc[best_idx, "Model File"]
231
+ best_score = df.loc[best_idx, "Weighted Score"]
232
+
233
+ st.markdown(f"""
234
+ **Best Overall Model** based on Weighted Score:
235
+ **{best_model}** (Score: {best_score:.3f}).
236
+
237
+ ### Interpretation:
238
+ - **Detection Norm** → fraction of the best detection count.
239
+ - **Confidence Norm** → fraction of the highest average confidence.
240
+ - **Speed Norm** → fraction of the highest (1/time). The fastest model is near 1; others are a fraction of that speed.
241
+
242
+ If you find one factor more important, adjust the weights:
243
+ - Increase **Detection** weight if you care about finding as many objects as possible.
244
+ - Increase **Confidence** weight if you only trust high‐confidence detections.
245
+ - Increase **Speed** weight if you need real‐time inference.
246
+ """)
247
+
248
+ # Display annotated images in a grid (row = image, column = model)
249
+ st.subheader("Annotated Images Grid (Row = Image, Column = Model)")
250
+ model_names_sorted = sorted(model_agg_data.keys())
251
+
252
+ for img_file in images:
253
+ st.markdown(f"### Image: {img_file.name}")
254
+ columns = st.columns(len(model_names_sorted))
255
+ for col, model_name in zip(columns, model_names_sorted):
256
+ # Retrieve the result object
257
+ r = model_image_results.get(model_name, {}).get(img_file.name, None)
258
+ if r is None:
259
+ col.write(f"No results for {model_name}")
260
+ continue
261
+
262
+ # Generate annotated image
263
+ try:
264
+ annotated_img_bgr = r.plot(conf=True, boxes=True, labels=True)
265
+ annotated_img_rgb = Image.fromarray(annotated_img_bgr[..., ::-1])
266
+ col.image(
267
+ annotated_img_rgb,
268
+ caption=f"{model_name}",
269
+ use_container_width=True
270
+ )
271
+ except Exception as e:
272
+ col.error(f"Error annotating image for {model_name}: {e}")
273
+
274
+ def main():
275
+ st.sidebar.title("Navigation")
276
+ page = st.sidebar.radio("Go to", ("YOLO Model Inference Tool", "YOLO Models Comparison Tool"))
277
+
278
+ if page == "YOLO Model Inference Tool":
279
+ yolo_inference_tool()
280
+ else:
281
+ yolo_model_comparison_tool()
282
+
283
+ if __name__ == "__main__":
284
+ main()