yolov11 / app0.py
stpete2's picture
Rename app.py to app0.py
86b7d4a verified
import gradio as gr
import cv2
import numpy as np
import os
import sys
from ultralytics import YOLO
from PIL import Image
import time
print("Python version:", sys.version)
print("Gradio version:", gr.__version__)
class SimpleObjectDetector:
def __init__(self):
"""Initialize YOLO11n general object detector"""
self.model = None
try:
self.model = YOLO('yolo11n.pt')
print("βœ… YOLO11n model initialization complete")
print("πŸ“¦ Can detect 80 object classes: person, car, animals, etc.")
except Exception as e:
import traceback
print(f"⚠️ Model initialization error: {e}")
traceback.print_exc()
print("πŸ”„ Running in dummy mode")
def detect(self, image, conf_threshold=0.25):
"""Object detection process"""
if image is None:
return None, []
if self.model is None:
# Dummy processing
result = image.copy()
cv2.putText(result, "MODEL NOT FOUND", (50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
return result, []
try:
# Run inference
results = self.model(image, conf=conf_threshold)
detections = []
if len(results) > 0:
annotated = results[0].plot()
# Get detection details
for box in results[0].boxes:
class_id = int(box.cls[0])
class_name = results[0].names[class_id]
confidence = float(box.conf[0])
detections.append({
'class': class_name,
'confidence': confidence
})
return annotated, detections
return image, []
except Exception as e:
print(f"Detection Error: {e}")
return image, []
# Create instance
detector = SimpleObjectDetector()
def process_image(image, conf_threshold):
"""Image processing function"""
if image is None:
return None, "Please upload an image"
# Convert from RGB (Gradio) to BGR (OpenCV)
if len(image.shape) == 3:
if image.shape[2] == 4: # If RGBA
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Run object detection
start_time = time.time()
result, detections = detector.detect(image, conf_threshold)
processing_time = time.time() - start_time
# Convert back to RGB
if result is not None:
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
# Format detection results
if len(detections) > 0:
result_text = f"βœ… Detected {len(detections)} object(s):\n\n"
for i, det in enumerate(detections, 1):
result_text += f"{i}. {det['class'].upper()} - Confidence: {det['confidence']*100:.1f}%\n"
result_text += f"\n⏱️ Processing time: {processing_time:.2f} seconds"
else:
result_text = "❌ No objects detected\n\nTry:\n- Adjusting confidence threshold\n- Using a clearer image\n- Getting closer to objects"
return result, result_text
def flip_image(image):
"""Flip image horizontally"""
if image is None:
return None
if isinstance(image, Image.Image):
image = np.array(image)
return cv2.flip(image, 1)
def rotate_image(image, angle):
"""Rotate image by specified angle"""
if image is None:
return None
if isinstance(image, Image.Image):
image = np.array(image)
height, width = image.shape[:2]
center = (width // 2, height // 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, rotation_matrix, (width, height))
return rotated
def adjust_brightness_contrast(image, bright_val, contrast_val):
"""Adjust image brightness and contrast"""
if image is None:
return None
if isinstance(image, Image.Image):
image = np.array(image)
img_float = image.astype(np.float32) / 255.0
adjusted = img_float * contrast_val + (bright_val - 1.0)
adjusted = np.clip(adjusted, 0, 1)
adjusted = (adjusted * 255).astype(np.uint8)
return adjusted
def generate_test_image():
"""Generate test image with sample objects"""
img = np.ones((480, 640, 3), dtype=np.uint8) * 230
# Draw sample shapes
cv2.rectangle(img, (100, 150), (200, 300), (50, 50, 200), -1) # "car-like" shape
cv2.circle(img, (400, 200), 50, (200, 50, 50), -1) # circle
cv2.putText(img, "TEST IMAGE", (200, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
cv2.putText(img, "Click 'Detect Objects' to test", (150, 400),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
return img
# Create Gradio Interface
with gr.Blocks(title="YOLO11n Object Detection - Smartphone Friendly", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ“±πŸ” YOLO11n Object Detection")
gr.Markdown("### Detect 80 types of objects: people, vehicles, animals, and more!")
with gr.Row():
with gr.Column(scale=1):
# Instructions
gr.Markdown("""
## πŸ“‹ How to Use:
1. **Upload** an image or **Take Photo** (mobile)
2. Adjust **confidence threshold** if needed
3. Click **πŸš€ Detect Objects**
**Detectable Objects:**
- πŸš— Vehicles (car, truck, bus, motorcycle, bicycle)
- 🧍 People and body parts
- πŸ• Animals (dog, cat, bird, horse, etc.)
- ⚽ Sports equipment
- πŸͺ‘ Furniture and household items
- And 60+ more categories!
""")
# Image input
with gr.Group():
gr.Markdown("### πŸ“Έ Capture or Upload Image")
image_input = gr.Image(
label="Input Image",
type="numpy",
sources=["upload"],
interactive=True
)
# Confidence threshold
conf_slider = gr.Slider(
0.1, 0.9,
value=0.25,
step=0.05,
label="🎯 Confidence Threshold",
info="Lower = more detections (may include false positives)"
)
# Image manipulation controls
with gr.Accordion("πŸ”„ Image Adjustments", open=False):
with gr.Row():
flip_btn = gr.Button("πŸͺž Flip", size="sm")
rotate_90_btn = gr.Button("β†ͺ️ Rotate 90Β°", size="sm")
rotate_180_btn = gr.Button("πŸ”„ Rotate 180Β°", size="sm")
brightness = gr.Slider(0.5, 2.0, value=1.0, label="β˜€οΈ Brightness")
contrast = gr.Slider(0.5, 2.0, value=1.0, label="🎨 Contrast")
with gr.Column(scale=1):
# Detection results
gr.Markdown("## πŸ” Detection Results")
output_image = gr.Image(label="Detected Objects", interactive=False)
# Detection button
detect_btn = gr.Button(
"πŸš€ Detect Objects",
variant="primary",
size="lg"
)
# Results text
results_text = gr.Textbox(
label="πŸ“Š Detection Details",
lines=10,
interactive=False
)
# Test section
with gr.Accordion("πŸ§ͺ Test & Examples", open=False):
with gr.Row():
test_btn = gr.Button("Generate Test Image")
gr.Markdown("""
**πŸ’‘ Tips for Best Results:**
- Use clear, well-lit photos
- Ensure objects are not too far away
- Avoid heavy shadows or blur
- Try different confidence thresholds
""")
# Event handlers
test_btn.click(
fn=generate_test_image,
outputs=image_input
)
# Image adjustment buttons
flip_btn.click(
fn=flip_image,
inputs=image_input,
outputs=image_input
)
rotate_90_btn.click(
fn=lambda img: rotate_image(img, 90),
inputs=image_input,
outputs=image_input
)
rotate_180_btn.click(
fn=lambda img: rotate_image(img, 180),
inputs=image_input,
outputs=image_input
)
# Brightness/Contrast adjustments
brightness.change(
fn=lambda img, b, c: adjust_brightness_contrast(img, b, c) if img is not None else None,
inputs=[image_input, brightness, contrast],
outputs=image_input
)
contrast.change(
fn=lambda img, b, c: adjust_brightness_contrast(img, b, c) if img is not None else None,
inputs=[image_input, brightness, contrast],
outputs=image_input
)
# Main detection
detect_btn.click(
fn=process_image,
inputs=[image_input, conf_slider],
outputs=[output_image, results_text]
)
if __name__ == "__main__":
print("=" * 60)
print("πŸš€ YOLO11n Object Detection - Smartphone Friendly")
print("=" * 60)
print("πŸ“¦ Detects 80 object classes including:")
print(" - People, vehicles, animals")
print(" - Furniture, sports equipment")
print(" - Electronics, food items, and more!")
print("=" * 60)
print("🌐 Access via: http://localhost:7860")
print("πŸ“± Mobile: Use same network with computer's IP:7860")
print("=" * 60)
try:
demo.launch(
server_name="0.0.0.0",
server_port=7860,
debug=False,
share=True,
show_error=True,
max_file_size="20MB"
)
except Exception as e:
print(f"❌ Launch Error: {e}")
print("\nπŸ”§ Troubleshooting:")
print("1. Try different port: demo.launch(server_port=7861)")
print("2. Check firewall settings")
print("3. Ensure ultralytics is installed: pip install ultralytics")