AI_Traffic_Control_Center / src /streamlit_app.py
Adoption's picture
Update src/streamlit_app.py
1b3baae verified
import streamlit as st
import cv2
from ultralytics import YOLO
import tempfile
import time
from crewai import Agent, Task, Crew
from langchain_groq import ChatGroq
# --- CONFIG ---
st.set_page_config(page_title="CityFlow AI", page_icon="🚦", layout="wide")
# --- SIDEBAR ---
with st.sidebar:
st.header("🚦 CityFlow Control")
groq_api_key = st.text_input("Groq API Key", type="password")
uploaded_file = st.file_uploader("Upload CCTV Footage", type=['mp4', 'mov'])
confidence = st.slider("Detection Confidence", 0.0, 1.0, 0.3)
# --- MAIN APP ---
st.title("🚦 CityFlow: Autonomous Traffic Management")
st.markdown("Real-time computer vision + Multi-Agent reasoning to optimize traffic flow.")
# Initialize Model (YOLOv8n is small and fast)
@st.cache_resource
def load_model():
return YOLO('yolov8n.pt')
model = load_model()
# --- THE AGENTS ---
if groq_api_key:
llm = ChatGroq(temperature=0, model_name="llama-3.3-70b-versatile", groq_api_key=groq_api_key)
# Agent 1: The Traffic Analyst (Reads the data)
analyst = Agent(
role='Traffic Data Analyst',
goal='Analyze vehicle counts and congestion levels to determine traffic severity.',
backstory="You are an expert in urban flow. You look at raw numbers (car counts) and decide if it's 'Light', 'Heavy', or 'Gridlock'.",
llm=llm,
verbose=False
)
# Agent 2: The Signal Controller (Makes the decision)
controller = Agent(
role='Signal Control Officer',
goal='Decide the optimal traffic light duration based on severity.',
backstory="You control the city lights. If an ambulance is seen, you MUST open the lane. If traffic is heavy, extend Green light duration.",
llm=llm,
verbose=False
)
# --- VIDEO PROCESSING LOOP ---
if uploaded_file and groq_api_key:
# Save temp file
tfile = tempfile.NamedTemporaryFile(delete=False)
tfile.write(uploaded_file.read())
cap = cv2.VideoCapture(tfile.name)
col1, col2 = st.columns([2, 1])
with col1:
st_frame = st.empty()
with col2:
st_metrics = st.empty()
st_decision = st.empty()
frame_count = 0
# Process Video
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_count += 1
# Only run AI every 10 frames to keep it fast
if frame_count % 10 == 0:
# 1. PERCEPTION (YOLO)
results = model.track(frame, persist=True, conf=confidence)
# Count Vehicles
car_count = 0
emergency_count = 0
# Classes: 2=Car, 3=Motorcycle, 5=Bus, 7=Truck
# We assume class 0 is person, so we filter for vehicles.
# Note: Standard COCO dataset doesn't distinguish "ambulance",
# so we'll simulate emergency logic if we see a Truck (class 7) for this demo.
for result in results:
boxes = result.boxes
for box in boxes:
cls = int(box.cls[0])
if cls in [2, 3, 5, 7]:
car_count += 1
if cls == 7: # Simulating "Truck/Emergency" for demo logic
emergency_count += 1
# Draw Boxes on Frame
res_plotted = results[0].plot()
# 2. REASONING (CrewAI)
# We pass the real-time data to the agents
task_analyze = Task(
description=f"Current Status: {car_count} vehicles detected. {emergency_count} heavy/emergency vehicles. Analyze congestion level.",
agent=analyst,
expected_output="A short status: 'Light', 'Moderate', 'Critical'."
)
task_control = Task(
description="Based on the analysis, decide the Green Light duration (in seconds). If Status is Critical or Emergency detected, set to MAX (60s).",
agent=controller,
expected_output="JSON: {'status': '...', 'green_light_duration': 45, 'reason': '...'}"
)
# Creating a mini-crew for this single frame decision
# (In production, you'd run this async, not blocking the video)
crew = Crew(agents=[analyst, controller], tasks=[task_analyze, task_control])
decision = crew.kickoff()
# 3. VISUALIZATION
st_frame.image(res_plotted, channels="BGR")
st_metrics.metric(label="Vehicles Detected", value=car_count)
st_metrics.metric(label="Emergency/Heavy", value=emergency_count)
st_decision.info(f"πŸ€– AI Decision: \n\n{decision}")
cap.release()