| import gradio as gr |
| import cv2 |
| import numpy as np |
| import os |
| import sys |
| from ultralytics import YOLO |
| from PIL import Image |
| import time |
|
|
| print("Python version:", sys.version) |
| print("Gradio version:", gr.__version__) |
|
|
|
|
| class SimpleObjectDetector: |
| def __init__(self): |
| """Initialize YOLO11n general object detector""" |
| self.model = None |
| try: |
| self.model = YOLO('yolo11n.pt') |
| print("β
YOLO11n model initialization complete") |
| print("π¦ Can detect 80 object classes: person, car, animals, etc.") |
| except Exception as e: |
| import traceback |
| print(f"β οΈ Model initialization error: {e}") |
| traceback.print_exc() |
| print("π Running in dummy mode") |
|
|
| def detect(self, image, conf_threshold=0.25): |
| """Object detection process""" |
| if image is None: |
| return None, [] |
| |
| if self.model is None: |
| |
| result = image.copy() |
| cv2.putText(result, "MODEL NOT FOUND", (50, 100), |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) |
| return result, [] |
| |
| try: |
| |
| results = self.model(image, conf=conf_threshold) |
| |
| detections = [] |
| if len(results) > 0: |
| annotated = results[0].plot() |
| |
| |
| for box in results[0].boxes: |
| class_id = int(box.cls[0]) |
| class_name = results[0].names[class_id] |
| confidence = float(box.conf[0]) |
| detections.append({ |
| 'class': class_name, |
| 'confidence': confidence |
| }) |
| |
| return annotated, detections |
| |
| return image, [] |
| except Exception as e: |
| print(f"Detection Error: {e}") |
| return image, [] |
|
|
| |
| detector = SimpleObjectDetector() |
|
|
| def process_image(image, conf_threshold): |
| """Image processing function""" |
| if image is None: |
| return None, "Please upload an image" |
| |
| |
| if len(image.shape) == 3: |
| if image.shape[2] == 4: |
| image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) |
| image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) |
| |
| |
| start_time = time.time() |
| result, detections = detector.detect(image, conf_threshold) |
| processing_time = time.time() - start_time |
| |
| |
| if result is not None: |
| result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB) |
| |
| |
| if len(detections) > 0: |
| result_text = f"β
Detected {len(detections)} object(s):\n\n" |
| for i, det in enumerate(detections, 1): |
| result_text += f"{i}. {det['class'].upper()} - Confidence: {det['confidence']*100:.1f}%\n" |
| result_text += f"\nβ±οΈ Processing time: {processing_time:.2f} seconds" |
| else: |
| result_text = "β No objects detected\n\nTry:\n- Adjusting confidence threshold\n- Using a clearer image\n- Getting closer to objects" |
| |
| return result, result_text |
|
|
| def flip_image(image): |
| """Flip image horizontally""" |
| if image is None: |
| return None |
| |
| if isinstance(image, Image.Image): |
| image = np.array(image) |
| |
| return cv2.flip(image, 1) |
|
|
| def rotate_image(image, angle): |
| """Rotate image by specified angle""" |
| if image is None: |
| return None |
| |
| if isinstance(image, Image.Image): |
| image = np.array(image) |
| |
| height, width = image.shape[:2] |
| center = (width // 2, height // 2) |
| rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) |
| rotated = cv2.warpAffine(image, rotation_matrix, (width, height)) |
| |
| return rotated |
|
|
| def adjust_brightness_contrast(image, bright_val, contrast_val): |
| """Adjust image brightness and contrast""" |
| if image is None: |
| return None |
| |
| if isinstance(image, Image.Image): |
| image = np.array(image) |
| |
| img_float = image.astype(np.float32) / 255.0 |
| adjusted = img_float * contrast_val + (bright_val - 1.0) |
| adjusted = np.clip(adjusted, 0, 1) |
| adjusted = (adjusted * 255).astype(np.uint8) |
| |
| return adjusted |
|
|
| def generate_test_image(): |
| """Generate test image with sample objects""" |
| img = np.ones((480, 640, 3), dtype=np.uint8) * 230 |
| |
| |
| cv2.rectangle(img, (100, 150), (200, 300), (50, 50, 200), -1) |
| cv2.circle(img, (400, 200), 50, (200, 50, 50), -1) |
| |
| cv2.putText(img, "TEST IMAGE", (200, 50), |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2) |
| cv2.putText(img, "Click 'Detect Objects' to test", (150, 400), |
| cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) |
| |
| return img |
|
|
| |
| with gr.Blocks(title="YOLO11n Object Detection - Smartphone Friendly", theme=gr.themes.Soft()) as demo: |
| gr.Markdown("# π±π YOLO11n Object Detection") |
| gr.Markdown("### Detect 80 types of objects: people, vehicles, animals, and more!") |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| |
| gr.Markdown(""" |
| ## π How to Use: |
| 1. **Upload** an image or **Take Photo** (mobile) |
| 2. Adjust **confidence threshold** if needed |
| 3. Click **π Detect Objects** |
| |
| **Detectable Objects:** |
| - π Vehicles (car, truck, bus, motorcycle, bicycle) |
| - π§ People and body parts |
| - π Animals (dog, cat, bird, horse, etc.) |
| - β½ Sports equipment |
| - πͺ Furniture and household items |
| - And 60+ more categories! |
| """) |
| |
| |
| with gr.Group(): |
| gr.Markdown("### πΈ Capture or Upload Image") |
| image_input = gr.Image( |
| label="Input Image", |
| type="numpy", |
| sources=["upload"], |
| interactive=True |
| ) |
| |
| |
| conf_slider = gr.Slider( |
| 0.1, 0.9, |
| value=0.25, |
| step=0.05, |
| label="π― Confidence Threshold", |
| info="Lower = more detections (may include false positives)" |
| ) |
| |
| |
| with gr.Accordion("π Image Adjustments", open=False): |
| with gr.Row(): |
| flip_btn = gr.Button("πͺ Flip", size="sm") |
| rotate_90_btn = gr.Button("βͺοΈ Rotate 90Β°", size="sm") |
| rotate_180_btn = gr.Button("π Rotate 180Β°", size="sm") |
| |
| brightness = gr.Slider(0.5, 2.0, value=1.0, label="βοΈ Brightness") |
| contrast = gr.Slider(0.5, 2.0, value=1.0, label="π¨ Contrast") |
| |
| with gr.Column(scale=1): |
| |
| gr.Markdown("## π Detection Results") |
| output_image = gr.Image(label="Detected Objects", interactive=False) |
| |
| |
| detect_btn = gr.Button( |
| "π Detect Objects", |
| variant="primary", |
| size="lg" |
| ) |
| |
| |
| results_text = gr.Textbox( |
| label="π Detection Details", |
| lines=10, |
| interactive=False |
| ) |
| |
| |
| with gr.Accordion("π§ͺ Test & Examples", open=False): |
| with gr.Row(): |
| test_btn = gr.Button("Generate Test Image") |
| |
| gr.Markdown(""" |
| **π‘ Tips for Best Results:** |
| - Use clear, well-lit photos |
| - Ensure objects are not too far away |
| - Avoid heavy shadows or blur |
| - Try different confidence thresholds |
| """) |
| |
| |
| test_btn.click( |
| fn=generate_test_image, |
| outputs=image_input |
| ) |
| |
| |
| flip_btn.click( |
| fn=flip_image, |
| inputs=image_input, |
| outputs=image_input |
| ) |
| |
| rotate_90_btn.click( |
| fn=lambda img: rotate_image(img, 90), |
| inputs=image_input, |
| outputs=image_input |
| ) |
| |
| rotate_180_btn.click( |
| fn=lambda img: rotate_image(img, 180), |
| inputs=image_input, |
| outputs=image_input |
| ) |
| |
| |
| brightness.change( |
| fn=lambda img, b, c: adjust_brightness_contrast(img, b, c) if img is not None else None, |
| inputs=[image_input, brightness, contrast], |
| outputs=image_input |
| ) |
| |
| contrast.change( |
| fn=lambda img, b, c: adjust_brightness_contrast(img, b, c) if img is not None else None, |
| inputs=[image_input, brightness, contrast], |
| outputs=image_input |
| ) |
| |
| |
| detect_btn.click( |
| fn=process_image, |
| inputs=[image_input, conf_slider], |
| outputs=[output_image, results_text] |
| ) |
|
|
| if __name__ == "__main__": |
| print("=" * 60) |
| print("π YOLO11n Object Detection - Smartphone Friendly") |
| print("=" * 60) |
| print("π¦ Detects 80 object classes including:") |
| print(" - People, vehicles, animals") |
| print(" - Furniture, sports equipment") |
| print(" - Electronics, food items, and more!") |
| print("=" * 60) |
| print("π Access via: http://localhost:7860") |
| print("π± Mobile: Use same network with computer's IP:7860") |
| print("=" * 60) |
| |
| try: |
| demo.launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| debug=False, |
| share=True, |
| show_error=True, |
| max_file_size="20MB" |
| ) |
| except Exception as e: |
| print(f"β Launch Error: {e}") |
| print("\nπ§ Troubleshooting:") |
| print("1. Try different port: demo.launch(server_port=7861)") |
| print("2. Check firewall settings") |
| print("3. Ensure ultralytics is installed: pip install ultralytics") |