rekotvd commited on
Commit
f10e9c0
·
verified ·
1 Parent(s): 2ccff70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +742 -711
app.py CHANGED
@@ -1,712 +1,743 @@
1
- import streamlit as st
2
- import cv2
3
- import numpy as np
4
- import time
5
- import tempfile
6
- from PIL import Image
7
- import io
8
- import os
9
- import sys
10
- import threading
11
- from datetime import datetime
12
- import requests
13
- import google.generativeai as genai
14
- from ultralytics import YOLO
15
- import folium
16
- from streamlit_folium import folium_static
17
- import geocoder
18
- from twilio.rest import Client
19
-
20
- # Page configuration
21
- st.set_page_config(
22
- page_title="Crash Detection System",
23
- page_icon="🚨",
24
- layout="wide"
25
- )
26
-
27
- # App title and description
28
- st.markdown("<h1 style='text-align: center; color: #FF4B4B;'>Vehicle Crash Detection System</h1>", unsafe_allow_html=True)
29
- st.markdown("""
30
- <p style='text-align: center; font-size: 1.2em;'>Real-time vehicle crash detection and severity assessment</p>
31
- """, unsafe_allow_html=True)
32
-
33
- # Sidebar for API key and Twilio configuration
34
- with st.sidebar.expander("API Configuration", expanded=False):
35
- api_key = st.text_input("Google Gemini API Key", type="password")
36
- if api_key:
37
- genai.configure(api_key=api_key)
38
- st.success("API key configured!")
39
- else:
40
- st.warning("Please enter your Google Gemini API key")
41
-
42
- with st.sidebar.expander("Twilio Configuration", expanded=False):
43
- twilio_account_sid = st.text_input("Twilio Account SID", type="password")
44
- twilio_auth_token = st.text_input("Twilio Auth Token", type="password")
45
- twilio_from_number = st.text_input("Twilio From Number")
46
- recipient_number = st.text_input("Recipient Phone Number")
47
-
48
- # BACKEND IMPLEMENTATION
49
- @st.cache_resource
50
- def load_model():
51
- """Load the YOLO model with caching to avoid reloading"""
52
- return YOLO("yolov8n.pt")
53
-
54
- # Get the model
55
- model = load_model()
56
-
57
- def detect_vehicles(image):
58
- """
59
- Detects vehicles in an image using YOLOv8.
60
-
61
- Args:
62
- image: Either a numpy array (frame) or a path to an image file
63
-
64
- Returns:
65
- List of detected vehicles, annotated image, and vehicle details
66
- """
67
- # Class IDs for vehicles we're interested in
68
- vehicle_classes = {2: "Car", 3: "Motorcycle", 5: "Bus", 7: "Truck"}
69
- detected_vehicles = []
70
- vehicle_details = []
71
-
72
- # Process the image
73
- results = model(image, conf=confidence_threshold, classes=[2, 3, 5, 7]) # Only detect vehicles
74
-
75
- # Extract detected vehicles
76
- for result in results:
77
- if hasattr(result, "boxes") and result.boxes is not None:
78
- for box in result.boxes:
79
- if hasattr(box, "cls") and box.cls is not None:
80
- cls = int(box.cls.item())
81
- if cls in vehicle_classes:
82
- vehicle_type = vehicle_classes[cls]
83
- confidence = float(box.conf.item())
84
- x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())
85
-
86
- detected_vehicles.append(vehicle_type)
87
- vehicle_details.append({
88
- "type": vehicle_type,
89
- "confidence": confidence,
90
- "bbox": [x1, y1, x2, y2],
91
- "size": (x2-x1) * (y2-y1) # Area of bounding box
92
- })
93
-
94
- # Get the annotated image with bounding boxes
95
- annotated_frame = results[0].plot()
96
-
97
- return detected_vehicles, annotated_frame, vehicle_details
98
-
99
- def assess_crash_severity(image, detected_vehicles, vehicle_details):
100
- """
101
- Uses Google Gemini AI to assess crash severity in an image.
102
-
103
- Args:
104
- image: PIL Image object
105
- detected_vehicles: List of detected vehicles
106
- vehicle_details: List of dictionaries with vehicle details
107
-
108
- Returns:
109
- Severity assessment as a string, detailed analysis
110
- """
111
- if not api_key:
112
- return "API key not configured for severity assessment", "No analysis available"
113
-
114
- if len(detected_vehicles) < 2:
115
- return "No crash detected (insufficient vehicles)", "No analysis available"
116
-
117
- try:
118
- # Convert to PIL Image if it's a numpy array
119
- if isinstance(image, np.ndarray):
120
- image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
121
- else:
122
- image_pil = image
123
-
124
- # Use Gemini to assess severity
125
- model_gemini = genai.GenerativeModel("gemini-1.5-flash")
126
- prompt = f"""
127
- Analyze this image that shows a potential vehicle crash.
128
- Detected vehicles: {', '.join(detected_vehicles)}
129
- Vehicle details: {vehicle_details}
130
-
131
- Your task has two parts:
132
- Give 2 Lines Short crisp Analysis
133
- Analyze the crash severity in this image. Categorize the severity into: Minor or Severe
134
- """
135
-
136
- response = model_gemini.generate_content([prompt, image_pil])
137
-
138
- if response and hasattr(response, "text"):
139
- response_text = response.text.strip()
140
-
141
- # Parse the response to get severity and analysis
142
- if "SEVERITY:" in response_text and "ANALYSIS:" in response_text:
143
- severity_part = response_text.split("ANALYSIS:")[0].replace("SEVERITY:", "").strip()
144
- analysis_part = response_text.split("ANALYSIS:")[1].strip()
145
- return severity_part, analysis_part
146
- else:
147
- return response_text, "No structured analysis available"
148
- else:
149
- return "Unable to assess severity", "No analysis available"
150
-
151
- except Exception as e:
152
- return f"Error in severity assessment: {str(e)}", "Error in analysis"
153
-
154
- def get_current_location():
155
- """
156
- Get the current geolocation.
157
- Returns approximate location as a dictionary with lat, lng, address
158
- """
159
- try:
160
- g = geocoder.ip('me')
161
- return {
162
- "lat": g.lat,
163
- "lng": g.lng,
164
- "address": g.address
165
- }
166
- except Exception as e:
167
- return {
168
- "lat": 0,
169
- "lng": 0,
170
- "address": "Location unavailable"
171
- }
172
-
173
- from twilio.rest import Client
174
-
175
- def send_crash_alert_twilio(crash_data):
176
- """
177
- Send a text message alert using Twilio
178
-
179
- Args:
180
- crash_data: Dictionary with crash details
181
-
182
- Returns:
183
- Boolean indicating success and message
184
- """
185
- messaging_service_sid = "MGf47912734231e47b941784b93376839d"
186
- if not twilio_account_sid or not twilio_auth_token or not messaging_service_sid or not recipient_number:
187
- return False, "Twilio configuration incomplete"
188
-
189
- try:
190
- # Initialize Twilio client
191
- client = Client(twilio_account_sid, twilio_auth_token)
192
-
193
- # Create message content
194
- message_body = f"""
195
- 🚨 CRASH ALERT 🚨
196
- Time: {crash_data['timestamp']}
197
- Vehicles: {', '.join(crash_data['vehicles'])}
198
-
199
- """
200
-
201
- # Send message using MessagingServiceSid
202
- message = client.messages.create(
203
- body=message_body,
204
- from_=twilio_from_number,
205
- messaging_service_sid="MGf47912734231e47b941784b93376839d", # Matches curl request
206
- to=recipient_number
207
- )
208
-
209
- return True, f"Message sent with SID: {message.sid}"
210
-
211
- except Exception as e:
212
- return False, f"Failed to send alert: {str(e)}"
213
-
214
-
215
- # FRONTEND IMPLEMENTATION
216
- # Create sidebar for controls
217
- st.sidebar.title("Controls")
218
-
219
- # Model selection
220
- detection_model = st.sidebar.selectbox(
221
- "Select Detection Model",
222
- ["YOLOv8n", "YOLOv8s", "YOLOv8m"],
223
- index=0
224
- )
225
-
226
- # Detection settings
227
- confidence_threshold = st.sidebar.slider(
228
- "Detection Confidence",
229
- min_value=0.0,
230
- max_value=1.0,
231
- value=0.45
232
- )
233
-
234
- # Input method selection
235
- input_method = st.sidebar.radio(
236
- "Input Source",
237
- ["Webcam", "Upload Image", "Upload Video"]
238
- )
239
-
240
- # Global variables for tracking detections
241
- last_crash_time = None
242
- crash_detected = False
243
- crash_severity = "None"
244
- crash_analysis = "None"
245
- detected_vehicles = []
246
- vehicle_details = []
247
- crash_alert_duration = 10 # seconds to show alert after crash detection
248
- latest_crash_image = None
249
- alert_sent_for_crash = False
250
-
251
- # Statistics
252
- if 'total_detections' not in st.session_state:
253
- st.session_state.total_detections = 0
254
- if 'total_crashes' not in st.session_state:
255
- st.session_state.total_crashes = 0
256
- if 'severe_crashes' not in st.session_state:
257
- st.session_state.severe_crashes = 0
258
- if 'alerts_sent' not in st.session_state:
259
- st.session_state.alerts_sent = 0
260
-
261
- # Create columns for the main display area
262
- col1, col2 = st.columns([2, 1])
263
-
264
- # Create a single map placeholder that will be populated only once
265
- map_container = st.container()
266
-
267
- # Image display area in column 1
268
- with col1:
269
- frame_placeholder = st.empty()
270
- # Initial image
271
- sample_img = np.zeros((480, 640, 3), dtype=np.uint8)
272
- frame_placeholder.image(sample_img, channels="BGR", use_column_width=True)
273
-
274
- # Info area in column 2
275
- with col2:
276
- status_placeholder = st.empty()
277
- vehicles_placeholder = st.empty()
278
- severity_placeholder = st.empty()
279
- analysis_placeholder = st.empty()
280
- timestamp_placeholder = st.empty()
281
- location_placeholder = st.empty()
282
- alert_status_placeholder = st.empty()
283
-
284
- # Initialize displays
285
- status_placeholder.markdown("""
286
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
287
- <h3>Status: Monitoring</h3>
288
- </div>
289
- """, unsafe_allow_html=True)
290
-
291
- vehicles_placeholder.markdown("""
292
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
293
- <h4>Detected Vehicles:</h4>
294
- <p>None</p>
295
- </div>
296
- """, unsafe_allow_html=True)
297
-
298
- severity_placeholder.markdown("""
299
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
300
- <h4>Crash Severity:</h4>
301
- <p>None</p>
302
- </div>
303
- """, unsafe_allow_html=True)
304
-
305
- analysis_placeholder.markdown("""
306
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
307
- <h4>Crash Analysis:</h4>
308
- <p>None</p>
309
- </div>
310
- """, unsafe_allow_html=True)
311
-
312
- timestamp_placeholder.markdown("""
313
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
314
- <h4>Timestamp:</h4>
315
- <p>N/A</p>
316
- </div>
317
- """, unsafe_allow_html=True)
318
-
319
- location_placeholder.markdown("""
320
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
321
- <h4>Location:</h4>
322
- <p>N/A</p>
323
- </div>
324
- """, unsafe_allow_html=True)
325
-
326
- # Add a placeholder for the map in the map container
327
- with map_container:
328
- map_placeholder = st.empty()
329
-
330
- # Initialize the map only once
331
- initial_location = get_current_location()
332
- m = folium.Map(location=[initial_location["lat"], initial_location["lng"]], zoom_start=15)
333
- folium.Marker(
334
- [initial_location["lat"], initial_location["lng"]],
335
- popup="Current Location",
336
- tooltip="Current Location"
337
- ).add_to(m)
338
- map_placeholder.empty() # Clear initially, will be populated when needed
339
-
340
- def process_frame(frame):
341
- """Process a single frame for crash detection"""
342
- global crash_detected, crash_severity, crash_analysis, detected_vehicles, vehicle_details
343
- global last_crash_time, latest_crash_image, alert_sent_for_crash
344
-
345
- try:
346
- # Detect vehicles
347
- vehicles, annotated_frame, vehicle_info = detect_vehicles(frame)
348
- detected_vehicles = vehicles
349
- vehicle_details = vehicle_info
350
-
351
- # Update total detections
352
- if len(vehicles) > 0:
353
- st.session_state.total_detections += 1
354
-
355
- # If multiple vehicles detected, check for crash
356
- if len(vehicles) >= 2:
357
- # Assess crash severity
358
- severity, analysis = assess_crash_severity(frame, vehicles, vehicle_info)
359
-
360
- # If severity indicates a crash
361
- if any(keyword in severity.lower() for keyword in ["minor", "moderate", "severe"]):
362
- crash_detected = True
363
- crash_severity = severity
364
- crash_analysis = analysis
365
- last_crash_time = time.time()
366
- latest_crash_image = annotated_frame.copy()
367
- alert_sent_for_crash = False
368
-
369
- # Update crash statistics
370
- st.session_state.total_crashes += 1
371
- if "severe" in severity.lower():
372
- st.session_state.severe_crashes += 1
373
-
374
- return annotated_frame
375
-
376
- except Exception as e:
377
- st.error(f"Error processing frame: {e}")
378
- return frame
379
-
380
- def send_crash_alert_if_needed():
381
- """Send Twilio alert if crash is detected and alert not yet sent"""
382
- global alert_sent_for_crash
383
-
384
- if crash_detected and not alert_sent_for_crash:
385
- # Get current location
386
- location = get_current_location()
387
-
388
- # Prepare crash data
389
- crash_data = {
390
- "timestamp": datetime.fromtimestamp(last_crash_time).strftime('%Y-%m-%d %H:%M:%S'),
391
- "severity": crash_severity,
392
- "analysis": crash_analysis,
393
- "vehicles": detected_vehicles,
394
- "vehicle_details": vehicle_details,
395
- "location": location
396
- }
397
-
398
- # Send alert
399
- success, message = send_crash_alert_twilio(crash_data)
400
-
401
- if success:
402
- alert_sent_for_crash = True
403
- st.session_state.alerts_sent += 1
404
- alert_status_placeholder.success(f"Alert sent: {message}")
405
- else:
406
- alert_status_placeholder.error(f"Alert failed: {message}")
407
-
408
- def update_info_display():
409
- """Update the information display"""
410
- global crash_detected, crash_severity, crash_analysis, detected_vehicles, last_crash_time
411
-
412
- # Check if crash alert should be active
413
- current_time = time.time()
414
- if last_crash_time and (current_time - last_crash_time > crash_alert_duration):
415
- crash_detected = False
416
-
417
- # Update status
418
- if crash_detected:
419
- status_placeholder.markdown(f"""
420
- <div style="padding: 10px; border-radius: 5px; background-color: #FF4B4B; color: white;">
421
- <h3>Status: CRASH DETECTED! 🚨</h3>
422
- </div>
423
- """, unsafe_allow_html=True)
424
- else:
425
- status_placeholder.markdown("""
426
- <div style="padding: 10px; border-radius: 5px; background-color: #4CAF50; color: white;">
427
- <h3>Status: Monitoring</h3>
428
- </div>
429
- """, unsafe_allow_html=True)
430
-
431
- # Update vehicles
432
- if detected_vehicles:
433
- vehicles_html = "<br>".join([f"• {v}" for v in detected_vehicles])
434
- vehicles_placeholder.markdown(f"""
435
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
436
- <h4>Detected Vehicles ({len(detected_vehicles)}):</h4>
437
- <p>{vehicles_html}</p>
438
- </div>
439
- """, unsafe_allow_html=True)
440
- else:
441
- vehicles_placeholder.markdown("""
442
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
443
- <h4>Detected Vehicles:</h4>
444
- <p>None</p>
445
- </div>
446
- """, unsafe_allow_html=True)
447
-
448
- # Update severity
449
- if crash_detected:
450
- severity_color = "#FF4B4B" if "severe" in crash_severity.lower() else "#FFA500"
451
- severity_placeholder.markdown(f"""
452
- <div style="padding: 10px; border-radius: 5px; background-color: {severity_color}; color: white;">
453
- <h4>Crash Severity:</h4>
454
- <p>{crash_severity}</p>
455
- </div>
456
- """, unsafe_allow_html=True)
457
-
458
- # Update analysis
459
- analysis_placeholder.markdown(f"""
460
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
461
- <h4>Crash Analysis:</h4>
462
- <p>{crash_analysis}</p>
463
- </div>
464
- """, unsafe_allow_html=True)
465
- else:
466
- severity_placeholder.markdown("""
467
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
468
- <h4>Crash Severity:</h4>
469
- <p>None</p>
470
- </div>
471
- """, unsafe_allow_html=True)
472
-
473
- analysis_placeholder.markdown("""
474
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
475
- <h4>Crash Analysis:</h4>
476
- <p>None</p>
477
- </div>
478
- """, unsafe_allow_html=True)
479
-
480
- # Update timestamp
481
- if last_crash_time:
482
- crash_time = datetime.fromtimestamp(last_crash_time).strftime('%Y-%m-%d %H:%M:%S')
483
- timestamp_placeholder.markdown(f"""
484
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
485
- <h4>Last Crash Detected:</h4>
486
- <p>{crash_time}</p>
487
- </div>
488
- """, unsafe_allow_html=True)
489
-
490
- # Update location
491
- location = get_current_location()
492
- location_placeholder.markdown(f"""
493
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
494
- <h4>Location:</h4>
495
- <p>{location['address']}</p>
496
- </div>
497
- """, unsafe_allow_html=True)
498
-
499
- # Update map with crash location
500
- m = folium.Map(location=[location["lat"], location["lng"]], zoom_start=15)
501
- folium.Marker(
502
- [location["lat"], location["lng"]],
503
- popup=f"Crash Location<br>Severity: {crash_severity}<br>Time: {crash_time}",
504
- tooltip="Crash Location",
505
- icon=folium.Icon(color='red', icon='warning-sign')
506
- ).add_to(m)
507
-
508
- # Only update the map once in the map container
509
- with map_container:
510
- folium_static(m)
511
-
512
- # Send alert if needed
513
- send_crash_alert_if_needed()
514
- else:
515
- timestamp_placeholder.markdown("""
516
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
517
- <h4>Timestamp:</h4>
518
- <p>N/A</p>
519
- </div>
520
- """, unsafe_allow_html=True)
521
-
522
- location_placeholder.markdown("""
523
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
524
- <h4>Location:</h4>
525
- <p>N/A</p>
526
- </div>
527
- """, unsafe_allow_html=True)
528
-
529
- # Handle different input methods
530
- if input_method == "Webcam":
531
- # Add webcam selection
532
- webcam_source = st.sidebar.selectbox(
533
- "Select Camera",
534
- ["0", "1", "2", "3"],
535
- index=0
536
- )
537
-
538
- start_button = st.sidebar.button("Start Detection")
539
- stop_button = st.sidebar.button("Stop Detection")
540
-
541
- if start_button:
542
- try:
543
- cap = cv2.VideoCapture(int(webcam_source))
544
- if not cap.isOpened():
545
- st.error(f"Cannot open webcam {webcam_source}")
546
- else:
547
- st.session_state.webcam_running = True
548
-
549
- while st.session_state.webcam_running and not stop_button:
550
- ret, frame = cap.read()
551
- if not ret:
552
- st.error("Failed to capture frame from webcam")
553
- break
554
-
555
- # Process frame
556
- processed_frame = process_frame(frame)
557
-
558
- # Display the frame
559
- frame_placeholder.image(processed_frame, channels="BGR", use_column_width=True)
560
-
561
- # Update info display
562
- update_info_display()
563
-
564
- # Rerun to check for stop button
565
- time.sleep(0.1)
566
-
567
- # Release resources
568
- cap.release()
569
- st.session_state.webcam_running = False
570
-
571
- except Exception as e:
572
- st.error(f"Error accessing webcam: {e}")
573
-
574
- if stop_button and 'webcam_running' in st.session_state:
575
- st.session_state.webcam_running = False
576
- st.success("Detection stopped")
577
-
578
- elif input_method == "Upload Image":
579
- uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
580
-
581
- if uploaded_file is not None:
582
- # Read image
583
- image_bytes = uploaded_file.read()
584
- image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_COLOR)
585
-
586
- # Display original image
587
- frame_placeholder.image(image, channels="BGR", caption="Uploaded Image", use_column_width=True)
588
-
589
- # Process button
590
- if st.sidebar.button("Process Image"):
591
- # Process the image
592
- processed_frame = process_frame(image)
593
-
594
- # Display processed image
595
- frame_placeholder.image(processed_frame, channels="BGR", caption="Processed Image", use_column_width=True)
596
-
597
- # Update info display
598
- update_info_display()
599
-
600
- elif input_method == "Upload Video":
601
- uploaded_file = st.sidebar.file_uploader("Choose a video...", type=["mp4", "avi", "mov"])
602
-
603
- if uploaded_file is not None:
604
- # Save to temporary file
605
- tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
606
- tfile.write(uploaded_file.read())
607
- tfile_path = tfile.name
608
- tfile.close()
609
-
610
- # Process button
611
- if st.sidebar.button("Process Video"):
612
- stop_processing = st.sidebar.button("Stop Processing")
613
-
614
- try:
615
- cap = cv2.VideoCapture(tfile_path)
616
-
617
- if not cap.isOpened():
618
- st.error("Cannot open video file")
619
- else:
620
- # Get video info
621
- fps = cap.get(cv2.CAP_PROP_FPS)
622
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
623
-
624
- # Add progress bar
625
- progress_bar = st.sidebar.progress(0)
626
- frame_number = 0
627
-
628
- # Process video
629
- st.session_state.video_running = True
630
-
631
- while st.session_state.video_running and not stop_processing:
632
- ret, frame = cap.read()
633
- if not ret:
634
- break
635
-
636
- # Process frame
637
- processed_frame = process_frame(frame)
638
-
639
- # Display the frame
640
- frame_placeholder.image(processed_frame, channels="BGR", use_column_width=True)
641
-
642
- # Update info display
643
- update_info_display()
644
-
645
- # Update progress
646
- frame_number += 1
647
- progress_bar.progress(min(frame_number / frame_count, 1.0))
648
-
649
- # Control playback speed
650
- time.sleep(0.1) # Faster than real-time for quick processing
651
-
652
- # Release resources
653
- cap.release()
654
-
655
- # Clean up
656
- os.unlink(tfile_path)
657
- st.session_state.video_running = False
658
-
659
- except Exception as e:
660
- st.error(f"Error processing video: {e}")
661
- try:
662
- os.unlink(tfile_path)
663
- except:
664
- pass
665
-
666
- # Stop button
667
- if st.sidebar.button("Stop", key="stop_video") and 'video_running' in st.session_state:
668
- st.session_state.video_running = False
669
- st.success("Processing stopped")
670
-
671
- # Add metrics/statistics section at the bottom
672
- st.markdown("---")
673
- col1, col2, col3, col4 = st.columns(4)
674
-
675
- with col1:
676
- st.markdown(f"""
677
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
678
- <h4>Total Detections</h4>
679
- <h2>{st.session_state.total_detections}</h2>
680
- </div>
681
- """, unsafe_allow_html=True)
682
-
683
- with col2:
684
- st.markdown(f"""
685
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
686
- <h4>Total Crashes</h4>
687
- <h2>{st.session_state.total_crashes}</h2>
688
- </div>
689
- """, unsafe_allow_html=True)
690
-
691
- with col3:
692
- st.markdown(f"""
693
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
694
- <h4>Severe Crashes</h4>
695
- <h2>{st.session_state.severe_crashes}</h2>
696
- </div>
697
- """, unsafe_allow_html=True)
698
-
699
- with col4:
700
- st.markdown(f"""
701
- <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
702
- <h4>SMS Alerts Sent</h4>
703
- <h2>{st.session_state.alerts_sent}</h2>
704
- </div>
705
- """, unsafe_allow_html=True)
706
-
707
- # Footer
708
- st.markdown("---")
709
- st.markdown("""
710
- <p style='text-align: center;'>Vehicle Crash Detection and Severity Assessment System</p>
711
- <p style='text-align: center;'>© 2025</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  """, unsafe_allow_html=True)
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import time
5
+ import tempfile
6
+ from PIL import Image
7
+ import io
8
+ import os
9
+ import sys
10
+ import threading
11
+ from datetime import datetime
12
+ import requests
13
+ import google.generativeai as genai
14
+ import folium
15
+ from streamlit_folium import folium_static
16
+ import geocoder
17
+ from twilio.rest import Client
18
+ from inference_sdk import InferenceHTTPClient
19
+
20
+ # Page configuration
21
+ st.set_page_config(
22
+ page_title="Crash Detection System",
23
+ page_icon="🚨",
24
+ layout="wide"
25
+ )
26
+
27
+ # App title and description
28
+ st.markdown("<h1 style='text-align: center; color: #FF4B4B;'>Vehicle Crash Detection System</h1>", unsafe_allow_html=True)
29
+ st.markdown("""
30
+ <p style='text-align: center; font-size: 1.2em;'>Real-time vehicle crash detection and severity assessment</p>
31
+ """, unsafe_allow_html=True)
32
+
33
+ # Sidebar for API key and Twilio configuration
34
+ with st.sidebar.expander("API Configuration", expanded=False):
35
+ api_key = st.text_input("Google Gemini API Key", type="password",value="AIzaSyCcf3s3GS7_925D7t2fgODc5WIKOMZSOzc")
36
+ roboflow_api_key = st.text_input("Roboflow API Key", value="7pg4aR4pI1rxRJ4BTQo8", type="password")
37
+ if api_key:
38
+ genai.configure(api_key=api_key)
39
+ st.success("Google API key configured!")
40
+ if roboflow_api_key:
41
+ st.success("Roboflow API key configured!")
42
+
43
+ with st.sidebar.expander("Twilio Configuration", expanded=False):
44
+ twilio_account_sid = st.text_input("Twilio Account SID", type="password")
45
+ twilio_auth_token = st.text_input("Twilio Auth Token", type="password")
46
+ twilio_from_number = st.text_input("Twilio From Number")
47
+ recipient_number = st.text_input("Recipient Phone Number")
48
+
49
+ # BACKEND IMPLEMENTATION
50
+ @st.cache_resource
51
+ def initialize_roboflow_client():
52
+ """Initialize the Roboflow client with caching"""
53
+ return InferenceHTTPClient(
54
+ api_url="https://detect.roboflow.com",
55
+ api_key=roboflow_api_key
56
+ )
57
+
58
+ # Get the client
59
+ CLIENT = initialize_roboflow_client()
60
+
61
+ def detect_crash(image):
62
+ """
63
+ Detects crashes in an image using Roboflow YOLO model
64
+
65
+ Args:
66
+ image: PIL Image or numpy array
67
+
68
+ Returns:
69
+ Dictionary with crash detection results, annotated image, and crash details
70
+ """
71
+ try:
72
+ # Convert to PIL Image if it's a numpy array
73
+ if isinstance(image, np.ndarray):
74
+ # Convert BGR (OpenCV) to RGB (PIL)
75
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
76
+ pil_image = Image.fromarray(image_rgb)
77
+ else:
78
+ pil_image = image
79
+
80
+ # Save image temporarily with reduced quality for faster upload
81
+ temp_img_path = "temp_detection_image.jpg"
82
+ pil_image.save(temp_img_path, "JPEG", quality=70)
83
+
84
+ # Send to Roboflow for inference
85
+ result = CLIENT.infer(temp_img_path, model_id="accident-yqljb/1")
86
+
87
+ # Clean up temp file
88
+ if os.path.exists(temp_img_path):
89
+ os.remove(temp_img_path)
90
+
91
+ # Initialize default response
92
+ crash_detected = False
93
+ severity = "None"
94
+ annotated_image = None
95
+
96
+ # Create annotated image (with bounding boxes)
97
+ if isinstance(image, np.ndarray):
98
+ annotated_image = image.copy()
99
+ else:
100
+ annotated_image = np.array(pil_image)
101
+ # Convert back to BGR for OpenCV operations
102
+ annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR)
103
+
104
+ # Process predictions if available
105
+ if "predictions" in result and result["predictions"]:
106
+ for pred in result["predictions"]:
107
+ crash_detected = True
108
+
109
+ # Extract severity based on class_id
110
+ class_id = pred.get("class_id", 0)
111
+ if class_id == 1:
112
+ severity = "Minor"
113
+ elif class_id == 2:
114
+ severity = "Moderate"
115
+ elif class_id == 3:
116
+ severity = "Severe"
117
+ else:
118
+ severity = "Unknown"
119
+
120
+ # Draw bounding box on the image
121
+ x, y = pred.get("x", 0), pred.get("y", 0)
122
+ width, height = pred.get("width", 0), pred.get("height", 0)
123
+ confidence = pred.get("confidence", 0)
124
+
125
+ # Calculate coordinates for rectangle
126
+ x1 = int(x - width/2)
127
+ y1 = int(y - height/2)
128
+ x2 = int(x + width/2)
129
+ y2 = int(y + height/2)
130
+
131
+ # Ensure coordinates are within image bounds
132
+ img_height, img_width = annotated_image.shape[:2]
133
+ x1 = max(0, min(x1, img_width-1))
134
+ y1 = max(0, min(y1, img_height-1))
135
+ x2 = max(0, min(x2, img_width-1))
136
+ y2 = max(0, min(y2, img_height-1))
137
+
138
+ # Set color based on severity
139
+ if severity == "Minor":
140
+ color = (0, 255, 255) # Yellow
141
+ elif severity == "Moderate":
142
+ color = (0, 165, 255) # Orange
143
+ else:
144
+ color = (0, 0, 255) # Red
145
+
146
+ # Draw rectangle and label
147
+ cv2.rectangle(annotated_image, (x1, y1), (x2, y2), color, 2)
148
+ label = f"{severity} crash: {confidence:.2f}"
149
+ cv2.putText(annotated_image, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
150
+
151
+ return {
152
+ "crash_detected": crash_detected,
153
+ "severity": severity,
154
+ "annotated_image": annotated_image,
155
+ "raw_result": result
156
+ }
157
+
158
+ except Exception as e:
159
+ st.error(f"Error in crash detection: {str(e)}")
160
+ # Return original image if error occurs
161
+ if isinstance(image, np.ndarray):
162
+ return {"crash_detected": False, "severity": "Error", "annotated_image": image, "raw_result": {}}
163
+ else:
164
+ return {"crash_detected": False, "severity": "Error", "annotated_image": np.array(pil_image), "raw_result": {}}
165
+
166
+ def assess_crash_severity(image, crash_info):
167
+ """
168
+ Uses Google Gemini AI to assess crash severity in an image.
169
+
170
+ Args:
171
+ image: PIL Image or numpy array
172
+ crash_info: Dictionary with crash detection results
173
+
174
+ Returns:
175
+ Detailed analysis as a string
176
+ """
177
+ if not api_key:
178
+ return "API key not configured for detailed analysis"
179
+
180
+ if not crash_info["crash_detected"]:
181
+ return "No crash detected in this image"
182
+
183
+ try:
184
+ # Convert to PIL Image if it's a numpy array
185
+ if isinstance(image, np.ndarray):
186
+ image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
187
+ else:
188
+ image_pil = image
189
+
190
+ # Use Gemini to assess severity
191
+ model_gemini = genai.GenerativeModel("gemini-1.5-flash")
192
+ prompt = f"""
193
+ Analyze this vehicle crash image.
194
+ Detected crash severity: {crash_info['severity']}
195
+ Raw detection data: {crash_info['raw_result']}
196
+
197
+ Give a short, 2-line analysis of the crash. Focus on:
198
+ 1. Apparent damage level and potential injuries
199
+ 2. Possible cause or contributing factors
200
+
201
+ Keep your response concise and direct.
202
+ """
203
+
204
+ response = model_gemini.generate_content([prompt, image_pil])
205
+
206
+ if response and hasattr(response, "text"):
207
+ return response.text.strip()
208
+ else:
209
+ return "Unable to generate detailed analysis"
210
+
211
+ except Exception as e:
212
+ return f"Error in analysis: {str(e)}"
213
+
214
+ def get_current_location():
215
+ """
216
+ Get the current geolocation.
217
+ Returns approximate location as a dictionary with lat, lng, address
218
+ """
219
+ try:
220
+ g = geocoder.ip('me')
221
+ return {
222
+ "lat": g.lat,
223
+ "lng": g.lng,
224
+ "address": g.address
225
+ }
226
+ except Exception as e:
227
+ return {
228
+ "lat": 0,
229
+ "lng": 0,
230
+ "address": "Location unavailable"
231
+ }
232
+
233
+ def send_crash_alert_twilio(crash_data):
234
+ """
235
+ Send a text message alert using Twilio
236
+
237
+ Args:
238
+ crash_data: Dictionary with crash details
239
+
240
+ Returns:
241
+ Boolean indicating success and message
242
+ """
243
+ messaging_service_sid = "MGf47912734231e47b941784b93376839d"
244
+ if not twilio_account_sid or not twilio_auth_token or not messaging_service_sid or not recipient_number:
245
+ return False, "Twilio configuration incomplete"
246
+
247
+ try:
248
+ # Initialize Twilio client
249
+ client = Client(twilio_account_sid, twilio_auth_token)
250
+
251
+ # Create message content
252
+ message_body = f"""
253
+ 🚨 CRASH ALERT 🚨
254
+ Time: {crash_data['timestamp']}
255
+ Severity: {crash_data['severity']}
256
+ Location: {crash_data['location']['address']}
257
+
258
+ Analysis: {crash_data['analysis']}
259
+ """
260
+
261
+ # Send message using MessagingServiceSid
262
+ message = client.messages.create(
263
+ body=message_body,
264
+ from_=twilio_from_number,
265
+ messaging_service_sid=messaging_service_sid,
266
+ to=recipient_number
267
+ )
268
+
269
+ return True, f"Message sent with SID: {message.sid}"
270
+
271
+ except Exception as e:
272
+ return False, f"Failed to send alert: {str(e)}"
273
+
274
+ # FRONTEND IMPLEMENTATION
275
+ # Create sidebar for controls
276
+ st.sidebar.title("Controls")
277
+
278
+ # Detection settings
279
+ confidence_threshold = st.sidebar.slider(
280
+ "Detection Confidence",
281
+ min_value=0.0,
282
+ max_value=1.0,
283
+ value=0.45
284
+ )
285
+
286
+ # Input method selection
287
+ input_method = st.sidebar.radio(
288
+ "Input Source",
289
+ ["Webcam", "Upload Image", "Upload Video"]
290
+ )
291
+
292
+ # Global variables for tracking detections
293
+ last_crash_time = None
294
+ crash_detected = False
295
+ crash_severity = "None"
296
+ crash_analysis = "None"
297
+ crash_alert_duration = 10 # seconds to show alert after crash detection
298
+ latest_crash_image = None
299
+ alert_sent_for_crash = False
300
+
301
+ # Statistics
302
+ if 'total_detections' not in st.session_state:
303
+ st.session_state.total_detections = 0
304
+ if 'total_crashes' not in st.session_state:
305
+ st.session_state.total_crashes = 0
306
+ if 'severe_crashes' not in st.session_state:
307
+ st.session_state.severe_crashes = 0
308
+ if 'alerts_sent' not in st.session_state:
309
+ st.session_state.alerts_sent = 0
310
+
311
+ # Create columns for the main display area
312
+ col1, col2 = st.columns([2, 1])
313
+
314
+ # Create a single map placeholder that will be populated only once
315
+ map_container = st.container()
316
+
317
+ # Image display area in column 1
318
+ with col1:
319
+ frame_placeholder = st.empty()
320
+ # Initial image
321
+ sample_img = np.zeros((480, 640, 3), dtype=np.uint8)
322
+ frame_placeholder.image(sample_img, channels="BGR", use_column_width=True)
323
+
324
+ # Info area in column 2
325
+ with col2:
326
+ status_placeholder = st.empty()
327
+ severity_placeholder = st.empty()
328
+ analysis_placeholder = st.empty()
329
+ timestamp_placeholder = st.empty()
330
+ location_placeholder = st.empty()
331
+ alert_status_placeholder = st.empty()
332
+
333
+ # Initialize displays
334
+ status_placeholder.markdown("""
335
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
336
+ <h3>Status: Monitoring</h3>
337
+ </div>
338
+ """, unsafe_allow_html=True)
339
+
340
+ severity_placeholder.markdown("""
341
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
342
+ <h4>Crash Severity:</h4>
343
+ <p>None</p>
344
+ </div>
345
+ """, unsafe_allow_html=True)
346
+
347
+ analysis_placeholder.markdown("""
348
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
349
+ <h4>Crash Analysis:</h4>
350
+ <p>None</p>
351
+ </div>
352
+ """, unsafe_allow_html=True)
353
+
354
+ timestamp_placeholder.markdown("""
355
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
356
+ <h4>Timestamp:</h4>
357
+ <p>N/A</p>
358
+ </div>
359
+ """, unsafe_allow_html=True)
360
+
361
+ location_placeholder.markdown("""
362
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
363
+ <h4>Location:</h4>
364
+ <p>N/A</p>
365
+ </div>
366
+ """, unsafe_allow_html=True)
367
+
368
+ # Add a placeholder for the map in the map container
369
+ with map_container:
370
+ map_placeholder = st.empty()
371
+
372
+ # Initialize the map only once
373
+ initial_location = get_current_location()
374
+ m = folium.Map(location=[initial_location["lat"], initial_location["lng"]], zoom_start=15)
375
+ folium.Marker(
376
+ [initial_location["lat"], initial_location["lng"]],
377
+ popup="Current Location",
378
+ tooltip="Current Location"
379
+ ).add_to(m)
380
+ map_placeholder.empty() # Clear initially, will be populated when needed
381
+
382
+ def handle_crash_detection(image, is_video=False, is_webcam=False):
383
+ """
384
+ Unified function to handle crash detection and all subsequent actions
385
+
386
+ Args:
387
+ image: The image/frame to process
388
+ is_video: Whether this is from a video source
389
+ is_webcam: Whether this is from a webcam
390
+
391
+ Returns:
392
+ Dictionary with results and whether to stop processing
393
+ """
394
+ global crash_detected, crash_severity, crash_analysis, last_crash_time, latest_crash_image, alert_sent_for_crash
395
+
396
+ # Increment total detections
397
+ st.session_state.total_detections += 1
398
+
399
+ # Detect crash
400
+ detection_result = detect_crash(image)
401
+
402
+ # Update image with annotations
403
+ annotated_image = detection_result["annotated_image"]
404
+ frame_placeholder.image(annotated_image, channels="BGR", use_column_width=True)
405
+
406
+ # If crash detected
407
+ if detection_result["crash_detected"]:
408
+ # Set global variables
409
+ crash_detected = True
410
+ crash_severity = detection_result["severity"]
411
+ last_crash_time = time.time()
412
+ latest_crash_image = annotated_image.copy()
413
+ alert_sent_for_crash = False
414
+
415
+ # Get detailed analysis from Gemini
416
+ crash_analysis = assess_crash_severity(image, detection_result)
417
+
418
+ # Update crash statistics
419
+ st.session_state.total_crashes += 1
420
+ if crash_severity.lower() == "severe":
421
+ st.session_state.severe_crashes += 1
422
+
423
+ # Update info display
424
+ update_info_display()
425
+
426
+ # Get current location
427
+ location = get_current_location()
428
+
429
+ # Prepare crash data
430
+ crash_data = {
431
+ "timestamp": datetime.fromtimestamp(last_crash_time).strftime('%Y-%m-%d %H:%M:%S'),
432
+ "severity": crash_severity,
433
+ "analysis": crash_analysis,
434
+ "location": location,
435
+ "raw_detection": detection_result["raw_result"]
436
+ }
437
+
438
+ # Send alert
439
+ success, message = send_crash_alert_twilio(crash_data)
440
+
441
+ if success:
442
+ alert_sent_for_crash = True
443
+ st.session_state.alerts_sent += 1
444
+ alert_status_placeholder.success(f"Alert sent: {message}")
445
+ else:
446
+ alert_status_placeholder.error(f"Alert failed: {message}")
447
+
448
+ # Return result indicating whether to stop processing
449
+ return {
450
+ "crash_detected": True,
451
+ "should_stop": is_video and not is_webcam # Stop if video but not webcam
452
+ }
453
+ else:
454
+ # Update info display for monitoring state
455
+ status_placeholder.markdown("""
456
+ <div style="padding: 10px; border-radius: 5px; background-color: #4CAF50; color: white;">
457
+ <h3>Status: Monitoring</h3>
458
+ </div>
459
+ """, unsafe_allow_html=True)
460
+
461
+ return {
462
+ "crash_detected": False,
463
+ "should_stop": False
464
+ }
465
+
466
+ def update_info_display():
467
+ """Update the information display"""
468
+ global crash_detected, crash_severity, crash_analysis, last_crash_time
469
+
470
+ # Check if crash alert should be active
471
+ current_time = time.time()
472
+ if last_crash_time and (current_time - last_crash_time > crash_alert_duration):
473
+ crash_detected = False
474
+
475
+ # Update status
476
+ if crash_detected:
477
+ status_placeholder.markdown(f"""
478
+ <div style="padding: 10px; border-radius: 5px; background-color: #FF4B4B; color: white;">
479
+ <h3>Status: CRASH DETECTED! 🚨</h3>
480
+ </div>
481
+ """, unsafe_allow_html=True)
482
+ else:
483
+ status_placeholder.markdown("""
484
+ <div style="padding: 10px; border-radius: 5px; background-color: #4CAF50; color: white;">
485
+ <h3>Status: Monitoring</h3>
486
+ </div>
487
+ """, unsafe_allow_html=True)
488
+
489
+ # Update severity
490
+ if crash_detected:
491
+ severity_color = "#FF4B4B" if crash_severity.lower() == "severe" else "#FFA500"
492
+ severity_placeholder.markdown(f"""
493
+ <div style="padding: 10px; border-radius: 5px; background-color: {severity_color}; color: white;">
494
+ <h4>Crash Severity:</h4>
495
+ <p>{crash_severity}</p>
496
+ </div>
497
+ """, unsafe_allow_html=True)
498
+
499
+ # Update analysis
500
+ analysis_placeholder.markdown(f"""
501
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
502
+ <h4>Crash Analysis:</h4>
503
+ <p>{crash_analysis}</p>
504
+ </div>
505
+ """, unsafe_allow_html=True)
506
+ else:
507
+ severity_placeholder.markdown("""
508
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
509
+ <h4>Crash Severity:</h4>
510
+ <p>None</p>
511
+ </div>
512
+ """, unsafe_allow_html=True)
513
+
514
+ analysis_placeholder.markdown("""
515
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
516
+ <h4>Crash Analysis:</h4>
517
+ <p>None</p>
518
+ </div>
519
+ """, unsafe_allow_html=True)
520
+
521
+ # Update timestamp
522
+ if last_crash_time:
523
+ crash_time = datetime.fromtimestamp(last_crash_time).strftime('%Y-%m-%d %H:%M:%S')
524
+ timestamp_placeholder.markdown(f"""
525
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
526
+ <h4>Last Crash Detected:</h4>
527
+ <p>{crash_time}</p>
528
+ </div>
529
+ """, unsafe_allow_html=True)
530
+
531
+ # Update location
532
+ location = get_current_location()
533
+ location_placeholder.markdown(f"""
534
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
535
+ <h4>Location:</h4>
536
+ <p>{location['address']}</p>
537
+ </div>
538
+ """, unsafe_allow_html=True)
539
+
540
+ # Update map with crash location
541
+ m = folium.Map(location=[location["lat"], location["lng"]], zoom_start=15)
542
+ folium.Marker(
543
+ [location["lat"], location["lng"]],
544
+ popup=f"Crash Location<br>Severity: {crash_severity}<br>Time: {crash_time}",
545
+ tooltip="Crash Location",
546
+ icon=folium.Icon(color='red', icon='warning-sign')
547
+ ).add_to(m)
548
+
549
+ # Only update the map once in the map container
550
+ with map_container:
551
+ folium_static(m)
552
+ else:
553
+ timestamp_placeholder.markdown("""
554
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
555
+ <h4>Timestamp:</h4>
556
+ <p>N/A</p>
557
+ </div>
558
+ """, unsafe_allow_html=True)
559
+
560
+ location_placeholder.markdown("""
561
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0;">
562
+ <h4>Location:</h4>
563
+ <p>N/A</p>
564
+ </div>
565
+ """, unsafe_allow_html=True)
566
+
567
+ # Handle different input methods
568
+ if input_method == "Webcam":
569
+ # Add webcam selection
570
+ webcam_source = st.sidebar.selectbox(
571
+ "Select Camera",
572
+ ["0", "1", "2", "3"],
573
+ index=0
574
+ )
575
+
576
+ start_button = st.sidebar.button("Start Detection")
577
+ stop_button = st.sidebar.button("Stop Detection")
578
+
579
+ if start_button:
580
+ try:
581
+ cap = cv2.VideoCapture(int(webcam_source))
582
+ if not cap.isOpened():
583
+ st.error(f"Cannot open webcam {webcam_source}")
584
+ else:
585
+ st.session_state.webcam_running = True
586
+
587
+ while st.session_state.webcam_running and not stop_button:
588
+ ret, frame = cap.read()
589
+ if not ret:
590
+ st.error("Failed to capture frame from webcam")
591
+ break
592
+
593
+ # Process frame with unified handler
594
+ result = handle_crash_detection(frame, is_video=True, is_webcam=True)
595
+
596
+ # Rerun to check for stop button
597
+ time.sleep(0.1)
598
+
599
+ # Release resources
600
+ cap.release()
601
+ st.session_state.webcam_running = False
602
+
603
+ except Exception as e:
604
+ st.error(f"Error accessing webcam: {e}")
605
+
606
+ if stop_button and 'webcam_running' in st.session_state:
607
+ st.session_state.webcam_running = False
608
+ st.success("Detection stopped")
609
+
610
+ elif input_method == "Upload Image":
611
+ uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
612
+
613
+ if uploaded_file is not None:
614
+ # Read image
615
+ image_bytes = uploaded_file.read()
616
+ image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_COLOR)
617
+
618
+ # Display original image
619
+ frame_placeholder.image(image, channels="BGR", caption="Uploaded Image", use_column_width=True)
620
+
621
+ # Process button
622
+ if st.sidebar.button("Process Image"):
623
+ # Process the image with unified handler
624
+ handle_crash_detection(image)
625
+
626
+ elif input_method == "Upload Video":
627
+ uploaded_file = st.sidebar.file_uploader("Choose a video...", type=["mp4", "avi", "mov"])
628
+
629
+ if uploaded_file is not None:
630
+ # Save to temporary file
631
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
632
+ tfile.write(uploaded_file.read())
633
+ tfile_path = tfile.name
634
+ tfile.close()
635
+
636
+ # Process button
637
+ if st.sidebar.button("Process Video"):
638
+ stop_processing = st.sidebar.button("Stop Processing")
639
+
640
+ try:
641
+ cap = cv2.VideoCapture(tfile_path)
642
+
643
+ if not cap.isOpened():
644
+ st.error("Cannot open video file")
645
+ else:
646
+ # Get video info
647
+ fps = cap.get(cv2.CAP_PROP_FPS)
648
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
649
+
650
+ # Add progress bar
651
+ progress_bar = st.sidebar.progress(0)
652
+ frame_number = 0
653
+
654
+ # Process video
655
+ st.session_state.video_running = True
656
+ should_stop = False
657
+
658
+ while st.session_state.video_running and not stop_processing and not should_stop:
659
+ ret, frame = cap.read()
660
+ if not ret:
661
+ break
662
+
663
+ # Process frame with unified handler
664
+ result = handle_crash_detection(frame, is_video=True, is_webcam=False)
665
+ should_stop = result["should_stop"]
666
+
667
+ # If crash detected and it's a video, stop processing
668
+ if should_stop:
669
+ st.success("Crash detected! Video processing stopped.")
670
+ break
671
+
672
+ # Update progress
673
+ frame_number += 1
674
+ progress_bar.progress(min(frame_number / frame_count, 1.0))
675
+
676
+ # Process frames faster than real-time
677
+ time.sleep(0.05)
678
+
679
+ # Release resources
680
+ cap.release()
681
+
682
+ # Clean up
683
+ os.unlink(tfile_path)
684
+ st.session_state.video_running = False
685
+
686
+ # If video finished without crash detection
687
+ if not should_stop and frame_number >= frame_count:
688
+ st.info("Video processed. No crashes detected.")
689
+
690
+ except Exception as e:
691
+ st.error(f"Error processing video: {e}")
692
+ try:
693
+ os.unlink(tfile_path)
694
+ except:
695
+ pass
696
+
697
+ # Stop button
698
+ if st.sidebar.button("Stop", key="stop_video") and 'video_running' in st.session_state:
699
+ st.session_state.video_running = False
700
+ st.success("Processing stopped")
701
+
702
+ # Add metrics/statistics section at the bottom
703
+ st.markdown("---")
704
+ col1, col2, col3, col4 = st.columns(4)
705
+
706
+ with col1:
707
+ st.markdown(f"""
708
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
709
+ <h4>Total Detections</h4>
710
+ <h2>{st.session_state.total_detections}</h2>
711
+ </div>
712
+ """, unsafe_allow_html=True)
713
+
714
+ with col2:
715
+ st.markdown(f"""
716
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
717
+ <h4>Total Crashes</h4>
718
+ <h2>{st.session_state.total_crashes}</h2>
719
+ </div>
720
+ """, unsafe_allow_html=True)
721
+
722
+ with col3:
723
+ st.markdown(f"""
724
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
725
+ <h4>Severe Crashes</h4>
726
+ <h2>{st.session_state.severe_crashes}</h2>
727
+ </div>
728
+ """, unsafe_allow_html=True)
729
+
730
+ with col4:
731
+ st.markdown(f"""
732
+ <div style="padding: 10px; border-radius: 5px; background-color: #f0f0f0; text-align: center;">
733
+ <h4>SMS Alerts Sent</h4>
734
+ <h2>{st.session_state.alerts_sent}</h2>
735
+ </div>
736
+ """, unsafe_allow_html=True)
737
+
738
+ # Footer
739
+ st.markdown("---")
740
+ st.markdown("""
741
+ <p style='text-align: center;'>Vehicle Crash Detection and Severity Assessment System</p>
742
+ <p style='text-align: center;'>© 2025</p>
743
  """, unsafe_allow_html=True)