diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,1558 +1,1558 @@ -import streamlit as st -from PIL import Image -import time -from ultralytics import YOLO -import matplotlib.pyplot as plt -import pandas as pd -import numpy as np -import tensorflow as tf -import cv2 -import traceback - -# ๐ŸŽจ PREMIUM PAGE CONFIGURATION -st.set_page_config( - page_title="SmartLane AI ยท Traffic Intelligence Platform", - page_icon="๐Ÿšฆ", - layout="wide", - initial_sidebar_state="collapsed" -) - -# ๐Ÿ’Ž ULTRA-PREMIUM DESIGN SYSTEM -st.markdown(""" - -""", unsafe_allow_html=True) - -# SIDEBAR - EMERGENCY TEST CONTROLS -st.sidebar.title("๐Ÿšจ Emergency Controls") -st.sidebar.markdown("---") - -# Emergency override for testing -force_emergency = st.sidebar.checkbox( - "๐Ÿ”ด Force Emergency Mode (Testing)", False) -if force_emergency: - emergency_direction = st.sidebar.selectbox( - "Select Emergency Direction", - ["North", "East", "South", "West"] - ) - emergency_conf_override = st.sidebar.slider( - "Emergency Confidence %", - 50, 100, 95 - ) -else: - emergency_direction = None - emergency_conf_override = 95 - -# Detection threshold -detection_threshold = st.sidebar.slider( - "CNN Detection Threshold %", - 30, 95, 50, - help="Lower threshold = more sensitive CNN detection" -) - -# Detection method priorities -st.sidebar.markdown("### ๐Ÿ” Detection Methods (Priority Order)") -st.sidebar.markdown(""" -1. **Manual Override** - Testing mode -2. **YOLO + Color** - Detects truck/bus with ambulance colors -3. **CNN Model** - Deep learning classification -4. **Color Analysis** - Red/white pattern detection -5. **Text Pattern** - Emergency text detection -""") - -st.sidebar.markdown("---") -st.sidebar.info( - "๐Ÿ’ก **Tip:** If YOLO detects a 'truck', the system will analyze if it's actually an ambulance based on color patterns!") -st.sidebar.warning( - "โš ๏ธ Make sure your ambulance image clearly shows red/white colors or 'AMBULANCE' text") - -# MODEL INITIALIZATION - - -@st.cache_resource -def load_models(): - """Load both YOLO and Ambulance CNN models""" - try: - yolo_model = YOLO("yolov8s.pt") - st.sidebar.success("โœ… YOLO Model Loaded") - - # Try to load ambulance model - try: - ambulance_model = tf.keras.models.load_model( - "ambulance_cnn_final.keras") - st.sidebar.success("โœ… Ambulance CNN Model Loaded") - - # Show model details - with st.sidebar.expander("๐Ÿ” Model Diagnostics"): - st.write(f"**Input Shape:** {ambulance_model.input_shape}") - st.write(f"**Output Shape:** {ambulance_model.output_shape}") - st.write(f"**Classes:** Ambulance (0), Non-Ambulance (1)") - - return yolo_model, ambulance_model - except Exception as e: - st.sidebar.warning(f"โš ๏ธ Ambulance model not found: {e}") - st.sidebar.info( - "Emergency detection will use manual override only") - return yolo_model, None - - except Exception as e: - st.sidebar.error(f"โŒ YOLO Model Error: {e}") - return None, None - - -yolo_model, ambulance_model = load_models() - -if yolo_model is None: - st.error( - "โŒ Critical Error: YOLO model failed to load. Please install: `pip install ultralytics`") - st.stop() - -vehicle_ids = [2, 3, 5, 7] -class_names = {2: 'car', 3: 'motorcycle', 5: 'bus', 7: 'truck'} - -# ADVANCED MULTI-METHOD AMBULANCE DETECTION - - -def detect_emergency_by_text(image): - """ - Detect ambulance by looking for text patterns using OCR-like approach - Looks for white text on red/blue background patterns - """ - try: - img_array = np.array(image) - img_hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV) - - # Look for white areas (ambulance text) - lower_white = np.array([0, 0, 200]) - upper_white = np.array([180, 30, 255]) - white_mask = cv2.inRange(img_hsv, lower_white, upper_white) - white_percentage = (np.sum(white_mask > 0) / white_mask.size) * 100 - - # Look for red/blue combination (emergency lights/stripes) - lower_red = np.array([0, 100, 100]) - upper_red = np.array([10, 255, 255]) - red_mask = cv2.inRange(img_hsv, lower_red, upper_red) - - lower_blue = np.array([100, 100, 100]) - upper_blue = np.array([130, 255, 255]) - blue_mask = cv2.inRange(img_hsv, lower_blue, upper_blue) - - red_percentage = (np.sum(red_mask > 0) / red_mask.size) * 100 - blue_percentage = (np.sum(blue_mask > 0) / blue_mask.size) * 100 - - # Ambulance typically has: significant white text + red/blue colors - if white_percentage > 5 and (red_percentage > 3 or blue_percentage > 3): - confidence = min( - 95, (white_percentage + red_percentage + blue_percentage) * 2) - return True, confidence - - return False, 0.0 - - except Exception as e: - return False, 0.0 - - -def detect_emergency_by_color(image): - """ - Enhanced color-based detection for emergency vehicles - Looks for red/white patterns and emergency light colors - """ - try: - img_array = np.array(image) - img_hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV) - - # Define range for bright red (ambulance body/stripes) - lower_red1 = np.array([0, 100, 100]) - upper_red1 = np.array([10, 255, 255]) - lower_red2 = np.array([170, 100, 100]) - upper_red2 = np.array([180, 255, 255]) - - # Create masks for red - mask1 = cv2.inRange(img_hsv, lower_red1, upper_red1) - mask2 = cv2.inRange(img_hsv, lower_red2, upper_red2) - red_mask = mask1 + mask2 - - # Look for white (ambulance text/body) - lower_white = np.array([0, 0, 200]) - upper_white = np.array([180, 30, 255]) - white_mask = cv2.inRange(img_hsv, lower_white, upper_white) - - # Calculate percentages - red_percentage = (np.sum(red_mask > 0) / red_mask.size) * 100 - white_percentage = (np.sum(white_mask > 0) / white_mask.size) * 100 - - # Ambulance has significant red AND white - if red_percentage > 5 and white_percentage > 10: - confidence = min(90, (red_percentage + white_percentage) * 2.5) - return True, confidence - elif red_percentage > 10: # Very red vehicle - return True, red_percentage * 4 - - return False, 0.0 - - except Exception as e: - return False, 0.0 - - -def detect_ambulance_yolo_enhanced(yolo_results, image): - """ - Use YOLO detection combined with color analysis - If YOLO detects a truck/bus, check if it has ambulance colors - """ - try: - detected_classes = [] - for box in yolo_results[0].boxes: - cls_id = int(box.cls.item()) - conf = float(box.conf.item()) - - # Check if it's a truck (7) or bus (5) with high confidence - if cls_id in [5, 7] and conf > 0.5: - detected_classes.append((cls_id, conf, box.xyxy[0])) - - # If we found trucks or buses, analyze their color patterns - for cls_id, conf, bbox in detected_classes: - try: - # Crop the detected vehicle - img_array = np.array(image) - x1, y1, x2, y2 = map(int, bbox) - x1, y1 = max(0, x1), max(0, y1) - x2, y2 = min(img_array.shape[1], x2), min( - img_array.shape[0], y2) - - cropped = img_array[y1:y2, x1:x2] - if cropped.size == 0: - continue - - # Analyze colors in the cropped region - cropped_pil = Image.fromarray(cropped) - is_emergency_color, color_conf = detect_emergency_by_color( - cropped_pil) - is_emergency_text, text_conf = detect_emergency_by_text( - cropped_pil) - - # If strong color or text indicators, it's likely an ambulance - if is_emergency_color and color_conf > 40: - return True, color_conf, "YOLO+Color" - if is_emergency_text and text_conf > 50: - return True, text_conf, "YOLO+Text" - - except Exception as e: - continue - - return False, 0.0, "YOLO" - - except Exception as e: - return False, 0.0, "YOLO" - - -def detect_ambulance(image, model, threshold=50): - """ - Master detection function - tries multiple methods - - Args: - image: PIL Image - model: Keras model - threshold: Detection confidence threshold (%) - - Returns: - tuple: (is_ambulance: bool, confidence: float) - """ - if model is None: - return False, 0.0 - - try: - # Convert PIL to numpy array - img_array = np.array(image) - - # Ensure RGB format - if len(img_array.shape) == 2: # Grayscale - img_array = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB) - elif img_array.shape[2] == 4: # RGBA - img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2RGB) - - # Resize to model input size - img_resized = cv2.resize(img_array, (192, 192)) - - # Normalize to [0, 1] - img_input = img_resized.astype('float32') / 255.0 - - # Add batch dimension - img_input = np.expand_dims(img_input, axis=0) - - # Predict with model - prediction = model.predict(img_input, verbose=0)[0] - - # Determine class (assuming binary classification) - # Class 0: Ambulance, Class 1: Non-Ambulance - ambulance_prob = float(prediction[0]) - non_ambulance_prob = float(prediction[1]) - - # Check which class has higher probability - is_ambulance = ambulance_prob > non_ambulance_prob - confidence = ambulance_prob * 100 if is_ambulance else non_ambulance_prob * 100 - - # Apply threshold - if is_ambulance and confidence >= threshold: - return True, confidence - else: - return False, confidence - - except Exception as e: - st.sidebar.error(f"๐Ÿ”ด Ambulance Detection Error: {str(e)}") - st.sidebar.code(traceback.format_exc()) - return False, 0.0 - - -# NAVIGATION BAR -st.markdown(""" - -""", unsafe_allow_html=True) - -# HERO SECTION -st.markdown(""" -
-
- โšก - TEAM SOURCE CODE -
-

- Next-Generation - Traffic Intelligence -

-

- Revolutionizing urban mobility with cutting-edge AI. Real-time vehicle detection, - adaptive signal optimization, and intelligent emergency vehicle prioritization powered by YOLOv8 & CNN. -

-
- ๐Ÿค– YOLOv8 Detection - โšก Deep Learning - ๐Ÿšจ Emergency Priority - ๐Ÿ“Š Real-Time Analytics - ๐ŸŽฏ 99.2% Accuracy -
-
-""", unsafe_allow_html=True) - -# STATISTICS GRID -st.markdown(""" -
-
-
๐Ÿ“Š
-
1,248
-
Total Analyses
-
-
-
๐Ÿš—
-
45,672
-
Vehicles Detected
-
-
-
๐Ÿšจ
-
342
-
Emergency Responses
-
-
-
โšก
-
~15s
-
Processing Time
-
-
-""", unsafe_allow_html=True) - -# UPLOAD SECTION -st.markdown(""" -
-

๐Ÿšฆ 4-Way Intersection Analysis

-

Upload traffic images from all four directions for comprehensive AI analysis with emergency vehicle detection

-
-""", unsafe_allow_html=True) - -directions = ["North", "East", "South", "West"] -direction_icons = ["โฌ†๏ธ", "โžก๏ธ", "โฌ‡๏ธ", "โฌ…๏ธ"] -uploaded_images = {} - -cols = st.columns(4) -for col, direction, icon in zip(cols, directions, direction_icons): - with col: - st.markdown(f""" -
-
{icon}
-
{direction}
-
Click below to upload image
-
- """, unsafe_allow_html=True) - uploaded_images[direction] = st.file_uploader( - f"{direction} Direction", - type=["jpg", "png", "jpeg"], - key=direction, - label_visibility="collapsed" - ) - -# PROCESSING LOGIC -if all(uploaded_images.values()): - # Initialize session state variables - if "images_processed" not in st.session_state: - st.session_state.images_processed = False - - if "all_signals_complete" not in st.session_state: - st.session_state.all_signals_complete = False - - # STEP 1: Process images only once - if not st.session_state.images_processed: - with st.spinner("๐Ÿง  Analyzing traffic patterns and detecting emergency vehicles..."): - progress_bar = st.progress(0) - - # Initialize storage - annotated_images = {} - counts = {} - class_counts = {} - emergency_status = {} - emergency_confidence = {} - detection_method = {} - - for idx, (direction, img_file) in enumerate(uploaded_images.items()): - progress_bar.progress((idx + 1) / 4) - - try: - img = Image.open(img_file).convert("RGB") - - # YOLO vehicle detection - results = yolo_model(img) - - # Count vehicles by class - class_count = {name: 0 for name in class_names.values()} - for cls in results[0].boxes.cls: - cls_id = int(cls.item()) - if cls_id in class_names: - class_count[class_names[cls_id]] += 1 - - counts[direction] = sum(class_count.values()) - class_counts[direction] = class_count - - # Create annotated image - annotated_array = results[0].plot() - annotated_img = Image.fromarray(annotated_array[..., ::-1]) - annotated_images[direction] = annotated_img - - # EMERGENCY DETECTION - Multiple Methods with Priority - is_emergency = False - conf = 0.0 - method = "None" - - # Method 1: Force emergency override (testing) - HIGHEST PRIORITY - if force_emergency and direction == emergency_direction: - is_emergency = True - conf = float(emergency_conf_override) - method = "Manual Override" - st.sidebar.success(f"โœ… {direction}: Emergency FORCED") - - # Method 2: YOLO + Color Analysis (truck/bus detected) - elif not is_emergency: - yolo_emergency, yolo_conf, yolo_method = detect_ambulance_yolo_enhanced( - results, img) - if yolo_emergency and yolo_conf > 40: - is_emergency = True - conf = yolo_conf - method = yolo_method - st.sidebar.success( - f"โœ… {direction}: Ambulance detected via {yolo_method} ({conf:.1f}%)") - - # Method 3: CNN Model Detection - if not is_emergency and ambulance_model is not None: - cnn_emergency, cnn_conf = detect_ambulance( - img, ambulance_model, detection_threshold) - if cnn_emergency: - is_emergency = True - conf = cnn_conf - method = "CNN Model" - st.sidebar.success( - f"โœ… {direction}: Ambulance detected by CNN ({conf:.1f}%)") - - # Method 4: Full image color analysis - if not is_emergency: - color_emergency, color_conf = detect_emergency_by_color( - img) - if color_emergency and color_conf > 50: - is_emergency = True - conf = color_conf - method = "Color Analysis" - st.sidebar.info( - f"โ„น๏ธ {direction}: Emergency detected by color ({conf:.1f}%)") - - # Method 5: Text pattern detection - if not is_emergency: - text_emergency, text_conf = detect_emergency_by_text( - img) - if text_emergency and text_conf > 60: - is_emergency = True - conf = text_conf - method = "Text Pattern" - st.sidebar.info( - f"โ„น๏ธ {direction}: Emergency detected by text pattern ({conf:.1f}%)") - - emergency_status[direction] = is_emergency - emergency_confidence[direction] = conf - detection_method[direction] = method - - except Exception as e: - st.error(f"โŒ Error processing {direction}: {e}") - st.code(traceback.format_exc()) - st.stop() - - # Store in session state - st.session_state.annotated_images = annotated_images - st.session_state.counts = counts - st.session_state.class_counts = class_counts - st.session_state.emergency_status = emergency_status - st.session_state.emergency_confidence = emergency_confidence - st.session_state.detection_method = detection_method - - # Check for emergency vehicles - emergency_directions = [ - d for d, status in emergency_status.items() if status] - - if emergency_directions: - # Emergency vehicles detected - prioritize them first - st.session_state.emergency_directions = emergency_directions - # Sort: Emergency directions first (by confidence), then regular by count - emergency_sorted = sorted( - [(d, counts[d]) for d in emergency_directions], - key=lambda x: emergency_confidence[x[0]], - reverse=True - ) - regular_sorted = sorted( - [(d, count) for d, count in counts.items() - if d not in emergency_directions], - key=lambda x: x[1], - reverse=True - ) - st.session_state.sorted_directions = emergency_sorted + regular_sorted - else: - st.session_state.emergency_directions = [] - # Normal sorting by vehicle count - st.session_state.sorted_directions = sorted( - counts.items(), - key=lambda x: x[1], - reverse=True - ) - - st.session_state.current_index = 0 - st.session_state.phase = "green" - st.session_state.finished = set() - st.session_state.images_processed = True - - progress_bar.empty() - - # Show detection summary - if emergency_directions: - st.sidebar.markdown("### ๐Ÿšจ EMERGENCY DETECTED!") - for d in emergency_directions: - st.sidebar.error( - f"**{d}**: {st.session_state.detection_method[d]} - {emergency_confidence[d]:.1f}%") - else: - st.sidebar.info("โ„น๏ธ No emergency vehicles detected") - - st.rerun() - - # STEP 2: Signal Control Loop - if not st.session_state.all_signals_complete: - if len(st.session_state.finished) < 4: - current_direction, current_count = st.session_state.sorted_directions[ - st.session_state.current_index] - - # Check if current direction has emergency vehicle - is_emergency = current_direction in st.session_state.emergency_directions - emergency_conf = st.session_state.emergency_confidence.get( - current_direction, 0.0) - detection_method_used = st.session_state.detection_method.get( - current_direction, "None") - - # Calculate timing - if is_emergency: - # Emergency vehicle gets immediate green with extended time - green_time = 35 # Extended time for emergency vehicles - yellow_time = 2 # Shorter yellow for faster transition - else: - base_time = 5 - time_per_vehicle = 1 - max_time = 25 - green_time = min(base_time + int(current_count/2) - * time_per_vehicle, max_time) - yellow_time = 3 - - # Display emergency alert if applicable - if is_emergency: - st.markdown(f""" -
-
๐Ÿšจ EMERGENCY VEHICLE DETECTED ๐Ÿšจ
-
- {current_direction.upper()} Direction โ€ข Confidence: {emergency_conf:.1f}% -
-
- Detection Method: {detection_method_used} -
-
- โš ๏ธ All other directions RED โ€ข Emergency vehicle has priority clearance for {green_time} seconds -
-
- """, unsafe_allow_html=True) - - # Display signal status - st.markdown(""" -
-

๐Ÿšฅ Live Signal Control

-

Real-time adaptive traffic light management system with emergency vehicle priority

-
- """, unsafe_allow_html=True) - - signal_cols = st.columns(4) - for idx, direction in enumerate(directions): - with signal_cols[idx]: - count = st.session_state.counts[direction] - is_current = direction == current_direction - has_emergency = direction in st.session_state.emergency_directions - method = st.session_state.detection_method.get( - direction, "None") - - if is_current and is_emergency: - # Emergency vehicle active - if st.session_state.phase == "green": - card_class = "signal-card emergency-active" - light_class = "light-emergency" - status = "๐Ÿšจ EMERGENCY" - status_color = "#ff0844" - else: - card_class = "signal-card yellow-active" - light_class = "light-yellow" - status = "๐ŸŸก YELLOW" - status_color = "#fbbf24" - elif is_current: - # Regular green/yellow - if st.session_state.phase == "green": - card_class = "signal-card green-active" - light_class = "light-green" - status = "๐ŸŸข GREEN" - status_color = "#10b981" - else: - card_class = "signal-card yellow-active" - light_class = "light-yellow" - status = "๐ŸŸก YELLOW" - status_color = "#fbbf24" - else: - card_class = "signal-card" - light_class = "light-red" - status = "๐Ÿ”ด RED" - status_color = "#ef4444" - - # Add emergency badge if detected - emergency_badge = "" - if has_emergency: - emergency_badge = f'''
- ๐Ÿšจ AMBULANCE
{method} -
''' - - st.markdown(f""" -
-
{direction_icons[idx]}
-
{direction}
-
{status}
-
{count} vehicles
- {emergency_badge} -
- """, unsafe_allow_html=True) - - # Timer display - timer_placeholder = st.empty() - - # Display detected images - st.markdown(""" -
-

๐ŸŽฏ Vehicle Detection Results

-

AI-powered object recognition and emergency vehicle classification

-
- """, unsafe_allow_html=True) - - img_cols = st.columns(4) - for idx, direction in enumerate(directions): - with img_cols[idx]: - caption = f"๐Ÿš— {direction} โ€ข {st.session_state.counts[direction]} vehicles" - if direction in st.session_state.emergency_directions: - method = st.session_state.detection_method[direction] - caption += f"\n๐Ÿšจ AMBULANCE ({st.session_state.emergency_confidence[direction]:.1f}% - {method})" - - st.image( - st.session_state.annotated_images[direction], - caption=caption, - use_container_width=True - ) - - # Countdown timer - duration = green_time if st.session_state.phase == "green" else yellow_time - for remaining in range(duration, 0, -1): - if is_emergency and st.session_state.phase == "green": - phase_emoji = "๐Ÿšจ" - phase_text = "EMERGENCY CLEARANCE ACTIVE" - timer_class = "timer-container emergency" - value_class = "timer-value emergency" - elif st.session_state.phase == "green": - phase_emoji = "๐ŸŸข" - phase_text = "GREEN LIGHT ACTIVE" - timer_class = "timer-container" - value_class = "timer-value" - else: - phase_emoji = "๐ŸŸก" - phase_text = "YELLOW LIGHT ACTIVE" - timer_class = "timer-container" - value_class = "timer-value" - - timer_placeholder.markdown(f""" -
-
- {phase_emoji} {phase_text} โ€ข {current_direction.upper()} DIRECTION -
-
{remaining}s
-
- """, unsafe_allow_html=True) - time.sleep(1) - - # Phase switching - if st.session_state.phase == "green": - st.session_state.phase = "yellow" - st.rerun() - else: - st.session_state.finished.add(current_direction) - st.session_state.current_index += 1 - st.session_state.phase = "green" - - if len(st.session_state.finished) < 4: - st.rerun() - else: - st.session_state.all_signals_complete = True - st.rerun() - - # STEP 3: Show Analytics - if st.session_state.all_signals_complete: - st.markdown(""" -
-
โœ… Analysis Complete!
-
- All traffic directions processed successfully with maximum precision -
-
- """, unsafe_allow_html=True) - - # Emergency Summary - if st.session_state.emergency_directions: - emergency_list = ", ".join([ - f"{d} ({st.session_state.emergency_confidence[d]:.1f}% - {st.session_state.detection_method[d]})" - for d in st.session_state.emergency_directions - ]) - st.markdown(f""" -
-
๐Ÿšจ EMERGENCY VEHICLE SUMMARY
-
- Detected in: {emergency_list} -
-
- โœ“ Emergency vehicles were given priority clearance with extended green time (35s) -
-
- """, unsafe_allow_html=True) - - # Total Vehicles - total_vehicles = sum(st.session_state.counts.values()) - st.markdown(f""" -
-

๐Ÿ“Š Traffic Intelligence Dashboard

-
- {total_vehicles} -
-
- Total Vehicles Detected -
-
- """, unsafe_allow_html=True) - - # Vehicle Classification Matrix - combined_data = {} - for direction in directions: - if direction in st.session_state.class_counts: - for vehicle_type, count in st.session_state.class_counts[direction].items(): - if vehicle_type not in combined_data: - combined_data[vehicle_type] = {} - combined_data[vehicle_type][direction] = count - - df_combined = pd.DataFrame(combined_data).T.fillna(0).astype(int) - df_combined = df_combined[directions] - - st.markdown(""" -
-

๐Ÿš— Vehicle Classification Matrix

-
- """, unsafe_allow_html=True) - st.dataframe( - df_combined.style.background_gradient(cmap='Blues', axis=None) - .set_properties(**{'text-align': 'center', 'font-size': '16px', 'font-weight': '700'}), - use_container_width=True, - height=280 - ) - - # Traffic Distribution Chart - st.markdown(""" -
-

๐Ÿ“ˆ Traffic Distribution Analysis

-
- """, unsafe_allow_html=True) - - fig, ax = plt.subplots(figsize=(14, 7)) - fig.patch.set_facecolor('none') - ax.set_facecolor('none') - - colors = [] - for d in st.session_state.counts.keys(): - if d in st.session_state.emergency_directions: - colors.append('#ff0844') # Red for emergency - else: - colors.append('#667eea') # Blue for regular - - bars = ax.bar( - st.session_state.counts.keys(), - st.session_state.counts.values(), - color=colors, - edgecolor='white', - linewidth=3, - alpha=0.95 - ) - - for bar, direction in zip(bars, st.session_state.counts.keys()): - height = bar.get_height() - label = f'{int(height)}' - if direction in st.session_state.emergency_directions: - label += '\n๐Ÿšจ' - ax.text(bar.get_x() + bar.get_width()/2., height + 1.5, - label, - ha='center', va='bottom', color='white', - fontweight='bold', fontsize=16) - - ax.set_ylabel('Vehicle Count', color='white', - fontsize=16, fontweight='bold') - ax.set_xlabel('Direction', color='white', - fontsize=16, fontweight='bold') - ax.tick_params(colors='white', labelsize=13, width=2) - ax.spines['bottom'].set_color('white') - ax.spines['left'].set_color('white') - ax.spines['bottom'].set_linewidth(2) - ax.spines['left'].set_linewidth(2) - ax.spines['top'].set_visible(False) - ax.spines['right'].set_visible(False) - ax.grid(axis='y', alpha=0.15, color='white', - linestyle='--', linewidth=1.5) - plt.tight_layout() - - st.pyplot(fig, use_container_width=True) - - # Busiest Direction - busiest = max(st.session_state.counts.items(), key=lambda x: x[1]) - st.markdown(f""" -
-

๐Ÿ† Critical Priority Direction

-
- {busiest[0].upper()} -
-
- {busiest[1]} vehicles detected โ€ข Highest Traffic Volume -
-
- """, unsafe_allow_html=True) - - # Signal Timings with Emergency Priority - timing_data = [] - for d, count in st.session_state.sorted_directions: - if d in st.session_state.emergency_directions: - green = 35 - priority = "๐Ÿšจ EMERGENCY" - else: - base_time = 5 - time_per_vehicle = 1 - max_time = 25 - green = min(base_time + int(count / 2) - * time_per_vehicle, max_time) - # Assign priority based on position - idx = [x[0] - for x in st.session_state.sorted_directions].index(d) - priorities = ["๐Ÿ”ด Critical", "๐ŸŸ  High", "๐ŸŸก Medium", "๐ŸŸข Low"] - priority = priorities[min(idx, 3)] - - timing_data.append({ - "Direction": d, - "Vehicles": count, - "Green Time (sec)": green, - "Priority": priority, - "Detection": st.session_state.detection_method.get(d, "None") - }) - - green_df = pd.DataFrame(timing_data) - - st.markdown(""" -
-

โฑ๏ธ AI-Optimized Signal Timings

-

Emergency vehicles receive 35-second priority clearance

-
- """, unsafe_allow_html=True) - st.dataframe( - green_df.style.background_gradient( - subset=['Green Time (sec)'], cmap='RdYlGn') - .set_properties(**{'text-align': 'center', 'font-size': '16px', 'font-weight': '700'}), - use_container_width=True, - height=280 - ) - - # Pie Chart - st.markdown(""" -
-

๐Ÿฅง Traffic Share Distribution

-
- """, unsafe_allow_html=True) - - fig2, ax2 = plt.subplots(figsize=(11, 11)) - fig2.patch.set_facecolor('none') - - pie_colors = [] - for d in st.session_state.counts.keys(): - if d in st.session_state.emergency_directions: - pie_colors.append('#ff0844') - else: - pie_colors.append('#667eea') - - wedges, texts, autotexts = ax2.pie( - st.session_state.counts.values(), - labels=st.session_state.counts.keys(), - autopct='%1.1f%%', - colors=pie_colors, - startangle=90, - textprops={'color': 'white', 'fontsize': 15, 'fontweight': 'bold'}, - explode=[0.08, 0.08, 0.08, 0.08], - shadow=True - ) - - for autotext in autotexts: - autotext.set_color('white') - autotext.set_fontsize(18) - autotext.set_fontweight('900') - - col1, col2, col3 = st.columns([1, 2, 1]) - with col2: - st.pyplot(fig2, use_container_width=True) - - # Action Buttons - st.markdown('
', unsafe_allow_html=True) - btn_cols = st.columns([1, 1, 1]) - - with btn_cols[0]: - if st.button("๐Ÿ”„ Run New Analysis", use_container_width=True): - for key in list(st.session_state.keys()): - del st.session_state[key] - st.rerun() - - with btn_cols[1]: - if st.button("๐Ÿ“Š Export Analytics", use_container_width=True): - csv_report = f"""SmartLane AI - Traffic Analysis Report -Generated: {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')} -=============================================== - -SUMMARY STATISTICS ------------------- -Total Vehicles Detected: {total_vehicles} -Emergency Vehicles: {len(st.session_state.emergency_directions)} -Total Cycle Time: {sum([t['Green Time (sec)'] for t in timing_data]) + 12} seconds - -EMERGENCY VEHICLE DETECTION ---------------------------- -""" - if st.session_state.emergency_directions: - for d in st.session_state.emergency_directions: - csv_report += f"{d}: Ambulance detected ({st.session_state.emergency_confidence[d]:.1f}% confidence)\n" - csv_report += f" Method: {st.session_state.detection_method[d]}\n" - else: - csv_report += "No emergency vehicles detected\n" - - csv_report += f"\nDIRECTION ANALYSIS\n------------------\n" - for direction in directions: - csv_report += f"{direction}: {st.session_state.counts[direction]} vehicles\n" - - total_cars = sum( - [st.session_state.class_counts[d].get('car', 0) for d in directions]) - total_motorcycles = sum( - [st.session_state.class_counts[d].get('motorcycle', 0) for d in directions]) - total_buses = sum( - [st.session_state.class_counts[d].get('bus', 0) for d in directions]) - total_trucks = sum( - [st.session_state.class_counts[d].get('truck', 0) for d in directions]) - - csv_report += f"\nVEHICLE CLASSIFICATION\n----------------------\n" - csv_report += f"Cars: {total_cars}\n" - csv_report += f"Motorcycles: {total_motorcycles}\n" - csv_report += f"Buses: {total_buses}\n" - csv_report += f"Trucks: {total_trucks}\n" - - csv_report += f"\nSIGNAL TIMINGS\n--------------\n" - for item in timing_data: - csv_report += f"{item['Direction']}: {item['Green Time (sec)']} seconds ({item['Priority']})\n" - - st.download_button( - label="โฌ‡๏ธ Download Report", - data=csv_report, - file_name=f"smartlane_report_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.txt", - mime="text/plain", - ) - - with btn_cols[2]: - if st.button("๐Ÿ“ง Share Results", use_container_width=True): - st.info("๐Ÿ“ค Sharing functionality available in production release") - - st.markdown('
', unsafe_allow_html=True) - - # Footer - st.markdown(""" -
-

- โšก Powered by SmartLane AI -

-

- PARANOX 2.0 National Hackathon Project โ€ข Developed by Team Source Code -

-
-

- Technology Stack:
- YOLOv8 Object Detection โ€ข CNN Ambulance Classification โ€ข Streamlit Framework โ€ข - Python Deep Learning โ€ข Real-Time Analytics Engine โ€ข Emergency Vehicle Priority System โ€ข - Adaptive Signal Processing โ€ข Computer Vision โ€ข Multi-Method Detection -

-
-
-

- ยฉ 2025 Team Source Code โ€ข Built for TechXNinjas PARANOX 2.0 Hackathon -

-
-
- """, unsafe_allow_html=True) - -else: - st.info("๐Ÿ“ค Please upload images for all four directions to begin analysis") +import streamlit as st +from PIL import Image +import time +from ultralytics import YOLO +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np +import tensorflow as tf +import cv2 +import traceback + +# ๐ŸŽจ PREMIUM PAGE CONFIGURATION +st.set_page_config( + page_title="SmartLane AI ยท Traffic Intelligence Platform", + page_icon="๐Ÿšฆ", + layout="wide", + initial_sidebar_state="collapsed" +) + +# ๐Ÿ’Ž ULTRA-PREMIUM DESIGN SYSTEM +st.markdown(""" + +""", unsafe_allow_html=True) + +# SIDEBAR - EMERGENCY TEST CONTROLS +st.sidebar.title("๐Ÿšจ Emergency Controls") +st.sidebar.markdown("---") + +# Emergency override for testing +force_emergency = st.sidebar.checkbox( + "๐Ÿ”ด Force Emergency Mode (Testing)", False) +if force_emergency: + emergency_direction = st.sidebar.selectbox( + "Select Emergency Direction", + ["North", "East", "South", "West"] + ) + emergency_conf_override = st.sidebar.slider( + "Emergency Confidence %", + 50, 100, 95 + ) +else: + emergency_direction = None + emergency_conf_override = 95 + +# Detection threshold +detection_threshold = st.sidebar.slider( + "CNN Detection Threshold %", + 30, 95, 50, + help="Lower threshold = more sensitive CNN detection" +) + +# Detection method priorities +st.sidebar.markdown("### ๐Ÿ” Detection Methods (Priority Order)") +st.sidebar.markdown(""" +1. **Manual Override** - Testing mode +2. **YOLO + Color** - Detects truck/bus with ambulance colors +3. **CNN Model** - Deep learning classification +4. **Color Analysis** - Red/white pattern detection +5. **Text Pattern** - Emergency text detection +""") + +st.sidebar.markdown("---") +st.sidebar.info( + "๐Ÿ’ก **Tip:** If YOLO detects a 'truck', the system will analyze if it's actually an ambulance based on color patterns!") +st.sidebar.warning( + "โš ๏ธ Make sure your ambulance image clearly shows red/white colors or 'AMBULANCE' text") + +# MODEL INITIALIZATION + + +@st.cache_resource +def load_models(): + """Load both YOLO and Ambulance CNN models""" + try: + yolo_model = YOLO("yolov8s.pt") + st.sidebar.success("โœ… YOLO Model Loaded") + + # Try to load ambulance model + try: + ambulance_model = tf.keras.models.load_model( + "ambulance_cnn_final.keras") + st.sidebar.success("โœ… Ambulance CNN Model Loaded") + + # Show model details + with st.sidebar.expander("๐Ÿ” Model Diagnostics"): + st.write(f"**Input Shape:** {ambulance_model.input_shape}") + st.write(f"**Output Shape:** {ambulance_model.output_shape}") + st.write(f"**Classes:** Ambulance (0), Non-Ambulance (1)") + + return yolo_model, ambulance_model + except Exception as e: + st.sidebar.warning(f"โš ๏ธ Ambulance model not found: {e}") + st.sidebar.info( + "Emergency detection will use manual override only") + return yolo_model, None + + except Exception as e: + st.sidebar.error(f"โŒ YOLO Model Error: {e}") + return None, None + + +yolo_model, ambulance_model = load_models() + +if yolo_model is None: + st.error( + "โŒ Critical Error: YOLO model failed to load. Please install: `pip install ultralytics`") + st.stop() + +vehicle_ids = [2, 3, 5, 7] +class_names = {2: 'car', 3: 'motorcycle', 5: 'bus', 7: 'truck'} + +# ADVANCED MULTI-METHOD AMBULANCE DETECTION + + +def detect_emergency_by_text(image): + """ + Detect ambulance by looking for text patterns using OCR-like approach + Looks for white text on red/blue background patterns + """ + try: + img_array = np.array(image) + img_hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV) + + # Look for white areas (ambulance text) + lower_white = np.array([0, 0, 200]) + upper_white = np.array([180, 30, 255]) + white_mask = cv2.inRange(img_hsv, lower_white, upper_white) + white_percentage = (np.sum(white_mask > 0) / white_mask.size) * 100 + + # Look for red/blue combination (emergency lights/stripes) + lower_red = np.array([0, 100, 100]) + upper_red = np.array([10, 255, 255]) + red_mask = cv2.inRange(img_hsv, lower_red, upper_red) + + lower_blue = np.array([100, 100, 100]) + upper_blue = np.array([130, 255, 255]) + blue_mask = cv2.inRange(img_hsv, lower_blue, upper_blue) + + red_percentage = (np.sum(red_mask > 0) / red_mask.size) * 100 + blue_percentage = (np.sum(blue_mask > 0) / blue_mask.size) * 100 + + # Ambulance typically has: significant white text + red/blue colors + if white_percentage > 5 and (red_percentage > 3 or blue_percentage > 3): + confidence = min( + 95, (white_percentage + red_percentage + blue_percentage) * 2) + return True, confidence + + return False, 0.0 + + except Exception as e: + return False, 0.0 + + +def detect_emergency_by_color(image): + """ + Enhanced color-based detection for emergency vehicles + Looks for red/white patterns and emergency light colors + """ + try: + img_array = np.array(image) + img_hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV) + + # Define range for bright red (ambulance body/stripes) + lower_red1 = np.array([0, 100, 100]) + upper_red1 = np.array([10, 255, 255]) + lower_red2 = np.array([170, 100, 100]) + upper_red2 = np.array([180, 255, 255]) + + # Create masks for red + mask1 = cv2.inRange(img_hsv, lower_red1, upper_red1) + mask2 = cv2.inRange(img_hsv, lower_red2, upper_red2) + red_mask = mask1 + mask2 + + # Look for white (ambulance text/body) + lower_white = np.array([0, 0, 200]) + upper_white = np.array([180, 30, 255]) + white_mask = cv2.inRange(img_hsv, lower_white, upper_white) + + # Calculate percentages + red_percentage = (np.sum(red_mask > 0) / red_mask.size) * 100 + white_percentage = (np.sum(white_mask > 0) / white_mask.size) * 100 + + # Ambulance has significant red AND white + if red_percentage > 5 and white_percentage > 10: + confidence = min(90, (red_percentage + white_percentage) * 2.5) + return True, confidence + elif red_percentage > 10: # Very red vehicle + return True, red_percentage * 4 + + return False, 0.0 + + except Exception as e: + return False, 0.0 + + +def detect_ambulance_yolo_enhanced(yolo_results, image): + """ + Use YOLO detection combined with color analysis + If YOLO detects a truck/bus, check if it has ambulance colors + """ + try: + detected_classes = [] + for box in yolo_results[0].boxes: + cls_id = int(box.cls.item()) + conf = float(box.conf.item()) + + # Check if it's a truck (7) or bus (5) with high confidence + if cls_id in [5, 7] and conf > 0.5: + detected_classes.append((cls_id, conf, box.xyxy[0])) + + # If we found trucks or buses, analyze their color patterns + for cls_id, conf, bbox in detected_classes: + try: + # Crop the detected vehicle + img_array = np.array(image) + x1, y1, x2, y2 = map(int, bbox) + x1, y1 = max(0, x1), max(0, y1) + x2, y2 = min(img_array.shape[1], x2), min( + img_array.shape[0], y2) + + cropped = img_array[y1:y2, x1:x2] + if cropped.size == 0: + continue + + # Analyze colors in the cropped region + cropped_pil = Image.fromarray(cropped) + is_emergency_color, color_conf = detect_emergency_by_color( + cropped_pil) + is_emergency_text, text_conf = detect_emergency_by_text( + cropped_pil) + + # If strong color or text indicators, it's likely an ambulance + if is_emergency_color and color_conf > 40: + return True, color_conf, "YOLO+Color" + if is_emergency_text and text_conf > 50: + return True, text_conf, "YOLO+Text" + + except Exception as e: + continue + + return False, 0.0, "YOLO" + + except Exception as e: + return False, 0.0, "YOLO" + + +def detect_ambulance(image, model, threshold=50): + """ + Master detection function - tries multiple methods + + Args: + image: PIL Image + model: Keras model + threshold: Detection confidence threshold (%) + + Returns: + tuple: (is_ambulance: bool, confidence: float) + """ + if model is None: + return False, 0.0 + + try: + # Convert PIL to numpy array + img_array = np.array(image) + + # Ensure RGB format + if len(img_array.shape) == 2: # Grayscale + img_array = cv2.cvtColor(img_array, cv2.COLOR_GRAY2RGB) + elif img_array.shape[2] == 4: # RGBA + img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2RGB) + + # Resize to model input size + img_resized = cv2.resize(img_array, (192, 192)) + + # Normalize to [0, 1] + img_input = img_resized.astype('float32') / 255.0 + + # Add batch dimension + img_input = np.expand_dims(img_input, axis=0) + + # Predict with model + prediction = model.predict(img_input, verbose=0)[0] + + # Determine class (assuming binary classification) + # Class 0: Ambulance, Class 1: Non-Ambulance + ambulance_prob = float(prediction[0]) + non_ambulance_prob = float(prediction[1]) + + # Check which class has higher probability + is_ambulance = ambulance_prob > non_ambulance_prob + confidence = ambulance_prob * 100 if is_ambulance else non_ambulance_prob * 100 + + # Apply threshold + if is_ambulance and confidence >= threshold: + return True, confidence + else: + return False, confidence + + except Exception as e: + st.sidebar.error(f"๐Ÿ”ด Ambulance Detection Error: {str(e)}") + st.sidebar.code(traceback.format_exc()) + return False, 0.0 + + +# NAVIGATION BAR +st.markdown(""" + +""", unsafe_allow_html=True) + +# HERO SECTION +st.markdown(""" +
+
+ โšก + TEAM SOURCE CODE +
+

+ Next-Generation + Traffic Intelligence +

+

+ Revolutionizing urban mobility with cutting-edge AI. Real-time vehicle detection, + adaptive signal optimization, and intelligent emergency vehicle prioritization powered by YOLOv8 & CNN. +

+
+ ๐Ÿค– YOLOv8 Detection + โšก Deep Learning + ๐Ÿšจ Emergency Priority + ๐Ÿ“Š Real-Time Analytics + ๐ŸŽฏ 99.2% Accuracy +
+
+""", unsafe_allow_html=True) + +# STATISTICS GRID +st.markdown(""" +
+
+
๐Ÿ“Š
+
1,248
+
Total Analyses
+
+
+
๐Ÿš—
+
45,672
+
Vehicles Detected
+
+
+
๐Ÿšจ
+
342
+
Emergency Responses
+
+
+
โšก
+
~15s
+
Processing Time
+
+
+""", unsafe_allow_html=True) + +# UPLOAD SECTION +st.markdown(""" +
+

๐Ÿšฆ 4-Way Intersection Analysis

+

Upload traffic images from all four directions for comprehensive AI analysis with emergency vehicle detection

+
+""", unsafe_allow_html=True) + +directions = ["North", "East", "South", "West"] +direction_icons = ["โฌ†๏ธ", "โžก๏ธ", "โฌ‡๏ธ", "โฌ…๏ธ"] +uploaded_images = {} + +cols = st.columns(4) +for col, direction, icon in zip(cols, directions, direction_icons): + with col: + st.markdown(f""" +
+
{icon}
+
{direction}
+
Click below to upload image
+
+ """, unsafe_allow_html=True) + uploaded_images[direction] = st.file_uploader( + f"{direction} Direction", + type=["jpg", "png", "jpeg"], + key=direction, + label_visibility="collapsed" + ) + +# PROCESSING LOGIC +if all(uploaded_images.values()): + # Initialize session state variables + if "images_processed" not in st.session_state: + st.session_state.images_processed = False + + if "all_signals_complete" not in st.session_state: + st.session_state.all_signals_complete = False + + # STEP 1: Process images only once + if not st.session_state.images_processed: + with st.spinner("๐Ÿง  Analyzing traffic patterns and detecting emergency vehicles..."): + progress_bar = st.progress(0) + + # Initialize storage + annotated_images = {} + counts = {} + class_counts = {} + emergency_status = {} + emergency_confidence = {} + detection_method = {} + + for idx, (direction, img_file) in enumerate(uploaded_images.items()): + progress_bar.progress((idx + 1) / 4) + + try: + img = Image.open(img_file).convert("RGB") + + # YOLO vehicle detection + results = yolo_model(img) + + # Count vehicles by class + class_count = {name: 0 for name in class_names.values()} + for cls in results[0].boxes.cls: + cls_id = int(cls.item()) + if cls_id in class_names: + class_count[class_names[cls_id]] += 1 + + counts[direction] = sum(class_count.values()) + class_counts[direction] = class_count + + # Create annotated image + annotated_array = results[0].plot() + annotated_img = Image.fromarray(annotated_array[..., ::-1]) + annotated_images[direction] = annotated_img + + # EMERGENCY DETECTION - Multiple Methods with Priority + is_emergency = False + conf = 0.0 + method = "None" + + # Method 1: Force emergency override (testing) - HIGHEST PRIORITY + if force_emergency and direction == emergency_direction: + is_emergency = True + conf = float(emergency_conf_override) + method = "Manual Override" + st.sidebar.success(f"โœ… {direction}: Emergency FORCED") + + # Method 2: YOLO + Color Analysis (truck/bus detected) + elif not is_emergency: + yolo_emergency, yolo_conf, yolo_method = detect_ambulance_yolo_enhanced( + results, img) + if yolo_emergency and yolo_conf > 40: + is_emergency = True + conf = yolo_conf + method = yolo_method + st.sidebar.success( + f"โœ… {direction}: Ambulance detected via {yolo_method} ({conf:.1f}%)") + + # Method 3: CNN Model Detection + if not is_emergency and ambulance_model is not None: + cnn_emergency, cnn_conf = detect_ambulance( + img, ambulance_model, detection_threshold) + if cnn_emergency: + is_emergency = True + conf = cnn_conf + method = "CNN Model" + st.sidebar.success( + f"โœ… {direction}: Ambulance detected by CNN ({conf:.1f}%)") + + # Method 4: Full image color analysis + if not is_emergency: + color_emergency, color_conf = detect_emergency_by_color( + img) + if color_emergency and color_conf > 50: + is_emergency = True + conf = color_conf + method = "Color Analysis" + st.sidebar.info( + f"โ„น๏ธ {direction}: Emergency detected by color ({conf:.1f}%)") + + # Method 5: Text pattern detection + if not is_emergency: + text_emergency, text_conf = detect_emergency_by_text( + img) + if text_emergency and text_conf > 60: + is_emergency = True + conf = text_conf + method = "Text Pattern" + st.sidebar.info( + f"โ„น๏ธ {direction}: Emergency detected by text pattern ({conf:.1f}%)") + + emergency_status[direction] = is_emergency + emergency_confidence[direction] = conf + detection_method[direction] = method + + except Exception as e: + st.error(f"โŒ Error processing {direction}: {e}") + st.code(traceback.format_exc()) + st.stop() + + # Store in session state + st.session_state.annotated_images = annotated_images + st.session_state.counts = counts + st.session_state.class_counts = class_counts + st.session_state.emergency_status = emergency_status + st.session_state.emergency_confidence = emergency_confidence + st.session_state.detection_method = detection_method + + # Check for emergency vehicles + emergency_directions = [ + d for d, status in emergency_status.items() if status] + + if emergency_directions: + # Emergency vehicles detected - prioritize them first + st.session_state.emergency_directions = emergency_directions + # Sort: Emergency directions first (by confidence), then regular by count + emergency_sorted = sorted( + [(d, counts[d]) for d in emergency_directions], + key=lambda x: emergency_confidence[x[0]], + reverse=True + ) + regular_sorted = sorted( + [(d, count) for d, count in counts.items() + if d not in emergency_directions], + key=lambda x: x[1], + reverse=True + ) + st.session_state.sorted_directions = emergency_sorted + regular_sorted + else: + st.session_state.emergency_directions = [] + # Normal sorting by vehicle count + st.session_state.sorted_directions = sorted( + counts.items(), + key=lambda x: x[1], + reverse=True + ) + + st.session_state.current_index = 0 + st.session_state.phase = "green" + st.session_state.finished = set() + st.session_state.images_processed = True + + progress_bar.empty() + + # Show detection summary + if emergency_directions: + st.sidebar.markdown("### ๐Ÿšจ EMERGENCY DETECTED!") + for d in emergency_directions: + st.sidebar.error( + f"**{d}**: {st.session_state.detection_method[d]} - {emergency_confidence[d]:.1f}%") + else: + st.sidebar.info("โ„น๏ธ No emergency vehicles detected") + + st.rerun() + + # STEP 2: Signal Control Loop + if not st.session_state.all_signals_complete: + if len(st.session_state.finished) < 4: + current_direction, current_count = st.session_state.sorted_directions[ + st.session_state.current_index] + + # Check if current direction has emergency vehicle + is_emergency = current_direction in st.session_state.emergency_directions + emergency_conf = st.session_state.emergency_confidence.get( + current_direction, 0.0) + detection_method_used = st.session_state.detection_method.get( + current_direction, "None") + + # Calculate timing + if is_emergency: + # Emergency vehicle gets immediate green with extended time + green_time = 35 # Extended time for emergency vehicles + yellow_time = 2 # Shorter yellow for faster transition + else: + base_time = 5 + time_per_vehicle = 1 + max_time = 25 + green_time = min(base_time + int(current_count/2) + * time_per_vehicle, max_time) + yellow_time = 3 + + # Display emergency alert if applicable + if is_emergency: + st.markdown(f""" +
+
๐Ÿšจ EMERGENCY VEHICLE DETECTED ๐Ÿšจ
+
+ {current_direction.upper()} Direction โ€ข Confidence: {emergency_conf:.1f}% +
+
+ Detection Method: {detection_method_used} +
+
+ โš ๏ธ All other directions RED โ€ข Emergency vehicle has priority clearance for {green_time} seconds +
+
+ """, unsafe_allow_html=True) + + # Display signal status + st.markdown(""" +
+

๐Ÿšฅ Live Signal Control

+

Real-time adaptive traffic light management system with emergency vehicle priority

+
+ """, unsafe_allow_html=True) + + signal_cols = st.columns(4) + for idx, direction in enumerate(directions): + with signal_cols[idx]: + count = st.session_state.counts[direction] + is_current = direction == current_direction + has_emergency = direction in st.session_state.emergency_directions + method = st.session_state.detection_method.get( + direction, "None") + + if is_current and is_emergency: + # Emergency vehicle active + if st.session_state.phase == "green": + card_class = "signal-card emergency-active" + light_class = "light-emergency" + status = "๐Ÿšจ EMERGENCY" + status_color = "#ff0844" + else: + card_class = "signal-card yellow-active" + light_class = "light-yellow" + status = "๐ŸŸก YELLOW" + status_color = "#fbbf24" + elif is_current: + # Regular green/yellow + if st.session_state.phase == "green": + card_class = "signal-card green-active" + light_class = "light-green" + status = "๐ŸŸข GREEN" + status_color = "#10b981" + else: + card_class = "signal-card yellow-active" + light_class = "light-yellow" + status = "๐ŸŸก YELLOW" + status_color = "#fbbf24" + else: + card_class = "signal-card" + light_class = "light-red" + status = "๐Ÿ”ด RED" + status_color = "#ef4444" + + # Add emergency badge if detected + emergency_badge = "" + if has_emergency: + emergency_badge = f'''
+ ๐Ÿšจ AMBULANCE
{method} +
''' + + st.markdown(f""" +
+
{direction_icons[idx]}
+
{direction}
+
{status}
+
{count} vehicles
+ {emergency_badge} +
+ """, unsafe_allow_html=True) + + # Timer display + timer_placeholder = st.empty() + + # Display detected images + st.markdown(""" +
+

๐ŸŽฏ Vehicle Detection Results

+

AI-powered object recognition and emergency vehicle classification

+
+ """, unsafe_allow_html=True) + + img_cols = st.columns(4) + for idx, direction in enumerate(directions): + with img_cols[idx]: + caption = f"๐Ÿš— {direction} โ€ข {st.session_state.counts[direction]} vehicles" + if direction in st.session_state.emergency_directions: + method = st.session_state.detection_method[direction] + caption += f"\n๐Ÿšจ AMBULANCE ({st.session_state.emergency_confidence[direction]:.1f}% - {method})" + + st.image( + st.session_state.annotated_images[direction], + caption=caption, + use_container_width=True + ) + + # Countdown timer + duration = green_time if st.session_state.phase == "green" else yellow_time + for remaining in range(duration, 0, -1): + if is_emergency and st.session_state.phase == "green": + phase_emoji = "๐Ÿšจ" + phase_text = "EMERGENCY CLEARANCE ACTIVE" + timer_class = "timer-container emergency" + value_class = "timer-value emergency" + elif st.session_state.phase == "green": + phase_emoji = "๐ŸŸข" + phase_text = "GREEN LIGHT ACTIVE" + timer_class = "timer-container" + value_class = "timer-value" + else: + phase_emoji = "๐ŸŸก" + phase_text = "YELLOW LIGHT ACTIVE" + timer_class = "timer-container" + value_class = "timer-value" + + timer_placeholder.markdown(f""" +
+
+ {phase_emoji} {phase_text} โ€ข {current_direction.upper()} DIRECTION +
+
{remaining}s
+
+ """, unsafe_allow_html=True) + time.sleep(1) + + # Phase switching + if st.session_state.phase == "green": + st.session_state.phase = "yellow" + st.rerun() + else: + st.session_state.finished.add(current_direction) + st.session_state.current_index += 1 + st.session_state.phase = "green" + + if len(st.session_state.finished) < 4: + st.rerun() + else: + st.session_state.all_signals_complete = True + st.rerun() + + # STEP 3: Show Analytics + if st.session_state.all_signals_complete: + st.markdown(""" +
+
โœ… Analysis Complete!
+
+ All traffic directions processed successfully with maximum precision +
+
+ """, unsafe_allow_html=True) + + # Emergency Summary + if st.session_state.emergency_directions: + emergency_list = ", ".join([ + f"{d} ({st.session_state.emergency_confidence[d]:.1f}% - {st.session_state.detection_method[d]})" + for d in st.session_state.emergency_directions + ]) + st.markdown(f""" +
+
๐Ÿšจ EMERGENCY VEHICLE SUMMARY
+
+ Detected in: {emergency_list} +
+
+ โœ“ Emergency vehicles were given priority clearance with extended green time (35s) +
+
+ """, unsafe_allow_html=True) + + # Total Vehicles + total_vehicles = sum(st.session_state.counts.values()) + st.markdown(f""" +
+

๐Ÿ“Š Traffic Intelligence Dashboard

+
+ {total_vehicles} +
+
+ Total Vehicles Detected +
+
+ """, unsafe_allow_html=True) + + # Vehicle Classification Matrix + combined_data = {} + for direction in directions: + if direction in st.session_state.class_counts: + for vehicle_type, count in st.session_state.class_counts[direction].items(): + if vehicle_type not in combined_data: + combined_data[vehicle_type] = {} + combined_data[vehicle_type][direction] = count + + df_combined = pd.DataFrame(combined_data).T.fillna(0).astype(int) + df_combined = df_combined[directions] + + st.markdown(""" +
+

๐Ÿš— Vehicle Classification Matrix

+
+ """, unsafe_allow_html=True) + st.dataframe( + df_combined.style.background_gradient(cmap='Blues', axis=None) + .set_properties(**{'text-align': 'center', 'font-size': '16px', 'font-weight': '700'}), + use_container_width=True, + height=280 + ) + + # Traffic Distribution Chart + st.markdown(""" +
+

๐Ÿ“ˆ Traffic Distribution Analysis

+
+ """, unsafe_allow_html=True) + + fig, ax = plt.subplots(figsize=(14, 7)) + fig.patch.set_facecolor('none') + ax.set_facecolor('none') + + colors = [] + for d in st.session_state.counts.keys(): + if d in st.session_state.emergency_directions: + colors.append('#ff0844') # Red for emergency + else: + colors.append('#667eea') # Blue for regular + + bars = ax.bar( + st.session_state.counts.keys(), + st.session_state.counts.values(), + color=colors, + edgecolor='white', + linewidth=3, + alpha=0.95 + ) + + for bar, direction in zip(bars, st.session_state.counts.keys()): + height = bar.get_height() + label = f'{int(height)}' + if direction in st.session_state.emergency_directions: + label += '\n๐Ÿšจ' + ax.text(bar.get_x() + bar.get_width()/2., height + 1.5, + label, + ha='center', va='bottom', color='white', + fontweight='bold', fontsize=16) + + ax.set_ylabel('Vehicle Count', color='white', + fontsize=16, fontweight='bold') + ax.set_xlabel('Direction', color='white', + fontsize=16, fontweight='bold') + ax.tick_params(colors='white', labelsize=13, width=2) + ax.spines['bottom'].set_color('white') + ax.spines['left'].set_color('white') + ax.spines['bottom'].set_linewidth(2) + ax.spines['left'].set_linewidth(2) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + ax.grid(axis='y', alpha=0.15, color='white', + linestyle='--', linewidth=1.5) + plt.tight_layout() + + st.pyplot(fig, use_container_width=True) + + # Busiest Direction + busiest = max(st.session_state.counts.items(), key=lambda x: x[1]) + st.markdown(f""" +
+

๐Ÿ† Critical Priority Direction

+
+ {busiest[0].upper()} +
+
+ {busiest[1]} vehicles detected โ€ข Highest Traffic Volume +
+
+ """, unsafe_allow_html=True) + + # Signal Timings with Emergency Priority + timing_data = [] + for d, count in st.session_state.sorted_directions: + if d in st.session_state.emergency_directions: + green = 35 + priority = "๐Ÿšจ EMERGENCY" + else: + base_time = 5 + time_per_vehicle = 1 + max_time = 25 + green = min(base_time + int(count / 2) + * time_per_vehicle, max_time) + # Assign priority based on position + idx = [x[0] + for x in st.session_state.sorted_directions].index(d) + priorities = ["๐Ÿ”ด Critical", "๐ŸŸ  High", "๐ŸŸก Medium", "๐ŸŸข Low"] + priority = priorities[min(idx, 3)] + + timing_data.append({ + "Direction": d, + "Vehicles": count, + "Green Time (sec)": green, + "Priority": priority, + "Detection": st.session_state.detection_method.get(d, "None") + }) + + green_df = pd.DataFrame(timing_data) + + st.markdown(""" +
+

โฑ๏ธ AI-Optimized Signal Timings

+

Emergency vehicles receive 35-second priority clearance

+
+ """, unsafe_allow_html=True) + st.dataframe( + green_df.style.background_gradient( + subset=['Green Time (sec)'], cmap='RdYlGn') + .set_properties(**{'text-align': 'center', 'font-size': '16px', 'font-weight': '700'}), + use_container_width=True, + height=280 + ) + + # Pie Chart + st.markdown(""" +
+

๐Ÿฅง Traffic Share Distribution

+
+ """, unsafe_allow_html=True) + + fig2, ax2 = plt.subplots(figsize=(11, 11)) + fig2.patch.set_facecolor('none') + + pie_colors = [] + for d in st.session_state.counts.keys(): + if d in st.session_state.emergency_directions: + pie_colors.append('#ff0844') + else: + pie_colors.append('#667eea') + + wedges, texts, autotexts = ax2.pie( + st.session_state.counts.values(), + labels=st.session_state.counts.keys(), + autopct='%1.1f%%', + colors=pie_colors, + startangle=90, + textprops={'color': 'white', 'fontsize': 15, 'fontweight': 'bold'}, + explode=[0.08, 0.08, 0.08, 0.08], + shadow=True + ) + + for autotext in autotexts: + autotext.set_color('white') + autotext.set_fontsize(18) + autotext.set_fontweight('900') + + col1, col2, col3 = st.columns([1, 2, 1]) + with col2: + st.pyplot(fig2, use_container_width=True) + + # Action Buttons + st.markdown('
', unsafe_allow_html=True) + btn_cols = st.columns([1, 1, 1]) + + with btn_cols[0]: + if st.button("๐Ÿ”„ Run New Analysis", use_container_width=True): + for key in list(st.session_state.keys()): + del st.session_state[key] + st.rerun() + + with btn_cols[1]: + if st.button("๐Ÿ“Š Export Analytics", use_container_width=True): + csv_report = f"""SmartLane AI - Traffic Analysis Report +Generated: {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')} +=============================================== + +SUMMARY STATISTICS +------------------ +Total Vehicles Detected: {total_vehicles} +Emergency Vehicles: {len(st.session_state.emergency_directions)} +Total Cycle Time: {sum([t['Green Time (sec)'] for t in timing_data]) + 12} seconds + +EMERGENCY VEHICLE DETECTION +--------------------------- +""" + if st.session_state.emergency_directions: + for d in st.session_state.emergency_directions: + csv_report += f"{d}: Ambulance detected ({st.session_state.emergency_confidence[d]:.1f}% confidence)\n" + csv_report += f" Method: {st.session_state.detection_method[d]}\n" + else: + csv_report += "No emergency vehicles detected\n" + + csv_report += f"\nDIRECTION ANALYSIS\n------------------\n" + for direction in directions: + csv_report += f"{direction}: {st.session_state.counts[direction]} vehicles\n" + + total_cars = sum( + [st.session_state.class_counts[d].get('car', 0) for d in directions]) + total_motorcycles = sum( + [st.session_state.class_counts[d].get('motorcycle', 0) for d in directions]) + total_buses = sum( + [st.session_state.class_counts[d].get('bus', 0) for d in directions]) + total_trucks = sum( + [st.session_state.class_counts[d].get('truck', 0) for d in directions]) + + csv_report += f"\nVEHICLE CLASSIFICATION\n----------------------\n" + csv_report += f"Cars: {total_cars}\n" + csv_report += f"Motorcycles: {total_motorcycles}\n" + csv_report += f"Buses: {total_buses}\n" + csv_report += f"Trucks: {total_trucks}\n" + + csv_report += f"\nSIGNAL TIMINGS\n--------------\n" + for item in timing_data: + csv_report += f"{item['Direction']}: {item['Green Time (sec)']} seconds ({item['Priority']})\n" + + st.download_button( + label="โฌ‡๏ธ Download Report", + data=csv_report, + file_name=f"smartlane_report_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.txt", + mime="text/plain", + ) + + with btn_cols[2]: + if st.button("๐Ÿ“ง Share Results", use_container_width=True): + st.info("๐Ÿ“ค Sharing functionality available in production release") + + st.markdown('
', unsafe_allow_html=True) + + # Footer + st.markdown(""" +
+

+ โšก Powered by SmartLane AI +

+

+ PARANOX 2.0 National Hackathon Project โ€ข Developed by Team SourceCode +

+
+

+ Technology Stack:
+ YOLOv8 Object Detection โ€ข CNN Ambulance Classification โ€ข Streamlit Framework โ€ข + Python Deep Learning โ€ข Real-Time Analytics Engine โ€ข Emergency Vehicle Priority System โ€ข + Adaptive Signal Processing โ€ข Computer Vision โ€ข Multi-Method Detection +

+
+
+

+ ยฉ 2025 Team SourceCode โ€ข Built for TechXNinjas PARANOX 2.0 Hackathon +

+
+
+ """, unsafe_allow_html=True) + +else: + st.info("๐Ÿ“ค Please upload images for all four directions to begin analysis")