# app.py import streamlit as st import pandas as pd import numpy as np import torch import json import os import tempfile from PIL import Image import cv2 import plotly.express as px import matplotlib.pyplot as plt from PIL import Image from transformers import DetrImageProcessor, DetrForObjectDetection from transformers import DetrImageProcessor, DetrForObjectDetection import networkx as nx # Set page config with light beige background st.set_page_config( page_title="AI Toolkit for Civil Engineers", layout="wide", page_icon="🏗️" ) # Custom CSS for light beige background and red navigation st.markdown( """ """, unsafe_allow_html=True ) st.title("🏗️ AI-Powered Civil Engineering Assistant") # Sidebar Navigation with reordered options st.sidebar.title("Navigation") section = st.sidebar.radio("Go to", [ "Project Management", # Moved to top as requested "On-site Safety", "Sustainable Design", # Moved to 3rd position "Video Safety Violation Detector", "About" ], index=0) # Load the DETR model and processor once @st.cache_resource def load_model(): processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50") return processor, model processor, model = load_model() # Sustainable Design Module def sustainable_design(): st.header("🌱 Sustainable Design and Carbon Footprint Analysis") material_db = pd.DataFrame({ "Material": ["Concrete", "Steel", "Wood", "Brick"], "Carbon_kg_per_ton": [300, 1700, 60, 200] }) st.write("### 📦 Select Materials and Quantities") selected_materials = {} for i, row in material_db.iterrows(): qty = st.number_input(f"{row['Material']} (tons)", min_value=0.0, step=0.1) selected_materials[row['Material']] = qty if st.button("Calculate Carbon Footprint"): total_kg = 0 for i, row in material_db.iterrows(): total_kg += selected_materials[row['Material']] * row['Carbon_kg_per_ton'] st.success(f"Estimated Total Carbon Emission: {total_kg:,.2f} kg CO₂") st.write("### 🧠 AI Recommendation") st.info("Consider using more sustainable materials like engineered wood or recycled concrete where possible.") # Project Management Module def project_management(): st.header("📊 Smart Project Management") file = st.file_uploader("Upload Project Schedule (CSV)", type="csv") if file: df = pd.read_csv(file) required_cols = {"Task ID", "Task Name", "Start Date", "Duration (days)", "End Date", "Predecessor"} if not required_cols.issubset(set(df.columns)): st.error(f"Missing required columns. Found: {list(df.columns)}") return # Convert date columns df["Start Date"] = pd.to_datetime(df["Start Date"]) df["End Date"] = pd.to_datetime(df["End Date"]) st.subheader("📄 Project Schedule") st.dataframe(df) st.markdown("### 🗓️ Project Timeline") st.write(f"- *Start Date:* {df['Start Date'].min().date()}") st.write(f"- *End Date:* {df['End Date'].max().date()}") st.write(f"- *Total Tasks:* {len(df)}") # Gantt Chart st.subheader("📊 Gantt Chart") gantt_df = df.rename(columns={"Task Name": "Task"}) fig = px.timeline( gantt_df, x_start="Start Date", x_end="End Date", y="Task", title="Project Gantt Chart", color="Task" ) fig.update_yaxes(categoryorder="total ascending") st.plotly_chart(fig, use_container_width=True) # Critical Path Calculation st.subheader("🔺 Critical Path Diagram") G = nx.DiGraph() for i, row in df.iterrows(): task_id = str(row["Task ID"]).strip() G.add_node(task_id, label=row["Task Name"], duration=row["Duration (days)"]) if pd.notna(row["Predecessor"]) and str(row["Predecessor"]).strip(): preds = [ str(int(float(p.strip()))) for p in str(row["Predecessor"]).split(",") if p.strip() != "" ] for pred in preds: G.add_edge(pred, task_id) try: critical_path = nx.dag_longest_path(G, weight="duration") st.success(f"Critical Path: {' → '.join([G.nodes[n]['label'] for n in critical_path])}") pos = nx.spring_layout(G) labels = {n: f"{G.nodes[n]['label']}" for n in G.nodes()} edge_colors = [ 'red' if (u in critical_path and v in critical_path and critical_path.index(v) == critical_path.index(u) + 1) else 'black' for u, v in G.edges() ] fig_cp, ax = plt.subplots(figsize=(10, 6)) nx.draw(G, pos, with_labels=True, labels=labels, node_color='skyblue', node_size=2000, edge_color=edge_colors, width=2, font_size=10, ax=ax) st.pyplot(fig_cp) except Exception as e: st.error(f"Error computing critical path: {e}") # On-site Safety Image Detection Module def on_site_safety(): st.header("🛡️ AI-Powered On-site Safety Monitor") uploaded_img = st.file_uploader("Upload an Image of Site", type=["jpg", "jpeg", "png"]) if uploaded_img: image = Image.open(uploaded_img) st.image(image, caption="Uploaded Image", use_column_width=True) st.write("🔍 Detecting Safety Gear (Demo)") with st.spinner("Running Object Detection..."): inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] labels = [model.config.id2label[label.item()] for label in results["labels"]] st.write(f"Detected items: {', '.join(labels)}") st.warning("PPE Detection Demo: Currently does not distinguish between PPE types.") # Video Safety Violation Module def video_safety_detector(): st.header("🎥 Video Safety Violation Detector") uploaded_video = st.file_uploader("Upload a video", type=["mp4", "avi", "mov"]) REQUIRED_GEAR = ["helmet", "goggles", "gloves"] if uploaded_video: tfile = tempfile.NamedTemporaryFile(delete=False) tfile.write(uploaded_video.read()) st.video(tfile.name) video = cv2.VideoCapture(tfile.name) fps = int(video.get(cv2.CAP_PROP_FPS)) frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) duration = frame_count // fps st.write(f"🔍 Extracting 1 frame per second (Total seconds: {duration})...") violation_found = False for sec in range(0, duration): video.set(cv2.CAP_PROP_POS_MSEC, sec * 1000) success, frame = video.read() if not success: continue frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image = Image.fromarray(frame_rgb) inputs = processor(images=[image], return_tensors="pt", padding=True) outputs = model(**inputs) target_sizes = torch.tensor([[image.size[1], image.size[0]]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] detected_labels = [model.config.id2label[label.item()].lower() for label in results["labels"]] missing_items = [item for item in REQUIRED_GEAR if item not in detected_labels] st.markdown(f"### 🕒 Second {sec}") st.image(image, width=400) if missing_items: violation_found = True st.markdown( f"
" f"🚨 Violation Detected!
" f"Missing: {', '.join(missing_items).title()}
", unsafe_allow_html=True ) else: st.markdown( f"
" f"✅ All Safety Gear Present
", unsafe_allow_html=True ) if not violation_found: st.success("🎉 No safety gear violations found in the video!") os.unlink(tfile.name) # About Page def about(): st.header("ℹ️ About This App") st.write("Developed for an AI Hackathon to empower civil engineers using modular AI-based tools.") st.markdown("- Built with 🐍 Python and Streamlit") st.markdown("- Deployable on Google Colab and Hugging Face Spaces") st.markdown("- Modules: Sustainable Design, Project Management, Safety Monitor, Video Violation Detection") # Navigation if section == "Project Management": # Changed to match new order project_management() elif section == "On-site Safety": on_site_safety() elif section == "Sustainable Design": # Changed to match new order sustainable_design() elif section == "Video Safety Violation Detector": video_safety_detector() elif section == "About": about()