objectdetection / src /streamlit_app.py
sandbox338's picture
Update src/streamlit_app.py
eaf6fcc verified
raw
history blame
4.82 kB
import streamlit as st
import numpy as np
import cv2
import os
import sys
from PIL import Image
import traceback
# Configure the app
st.set_page_config(
page_title="Object Detection App",
page_icon="πŸ”",
layout="wide"
)
# Display environment info if needed for debugging
if "debug" in st.experimental_get_query_params():
st.write("Python version:", sys.version)
st.write("Environment variables:", dict(os.environ))
st.write("Current working directory:", os.getcwd())
st.write("Directory contents:", os.listdir())
# Create a sidebar
st.sidebar.title("Object Detection App")
st.sidebar.markdown("""
This app uses Detectron2 to detect objects in images.
""")
# Display loading message
with st.spinner("Loading dependencies..."):
try:
import torch
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
st.sidebar.success("βœ… Dependencies loaded successfully!")
except Exception as e:
st.error(f"Failed to load dependencies: {e}")
st.error(traceback.format_exc())
st.stop()
# Load the model
@st.cache_resource
def load_model():
try:
# Configure the model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
# Use CPU for inference (more reliable in container environment)
cfg.MODEL.DEVICE = "cpu"
# Initialize predictor
predictor = DefaultPredictor(cfg)
return predictor, cfg
except Exception as e:
st.error(f"Error loading model: {e}")
st.error(traceback.format_exc())
return None, None
# Main function
def main():
st.title("πŸ” Object Detection")
st.markdown("""
Upload an image to detect objects using Detectron2's Faster R-CNN model.
""")
# Load model
with st.spinner("Loading model..."):
predictor, cfg = load_model()
if predictor is None:
st.error("Failed to load the model. Check the error messages.")
return
# File uploader
uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
try:
# Read and display the image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
# Convert to numpy array
image_array = np.array(image.convert("RGB"))
# Perform inference
with st.spinner("Detecting objects..."):
outputs = predictor(image_array)
# Get instances
instances = outputs["instances"].to("cpu")
# Create visualizer
v = Visualizer(image_array,
metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0] if len(cfg.DATASETS.TRAIN) else "coco_2017_val"),
scale=1.2)
# Draw predictions
result = v.draw_instance_predictions(instances)
result_image = result.get_image()
# Display result
st.image(result_image, caption="Detection Result", use_column_width=True)
# Show detection information
if len(instances) > 0:
st.subheader(f"Detected {len(instances)} objects")
# Get class names
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0] if len(cfg.DATASETS.TRAIN) else "coco_2017_val")
class_names = metadata.thing_classes
# Show detections
for i in range(len(instances)):
score = instances.scores[i].item()
class_id = instances.pred_classes[i].item()
class_name = class_names[class_id]
box = instances.pred_boxes[i].tensor.numpy()[0]
st.write(f"**{class_name}**: {score:.2f} confidence")
else:
st.info("No objects detected in this image.")
except Exception as e:
st.error(f"Error processing image: {e}")
st.error(traceback.format_exc())
if __name__ == "__main__":
main()