karthikmn commited on
Commit
d9864a1
·
verified ·
1 Parent(s): 90732ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -26
app.py CHANGED
@@ -1,41 +1,52 @@
1
- import gradio as gr
2
  import cv2
3
- import numpy as np
 
4
  from PIL import Image
 
 
 
 
5
 
6
- def process_image(image):
7
  """
8
- Process an uploaded or captured image: convert it to grayscale.
9
  """
10
- if isinstance(image, np.ndarray):
11
- # Convert to grayscale if it's a NumPy array (e.g., from webcam or upload)
12
- gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
13
- processed_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB)
14
- return processed_image
15
- elif isinstance(image, Image.Image):
16
- # Handle PIL image
17
- image = np.array(image)
18
- gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
19
- processed_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB)
20
- return Image.fromarray(processed_image)
21
- else:
22
- return "Invalid image format"
23
-
24
- # Create the Gradio interface
 
 
 
 
 
 
 
25
  with gr.Blocks() as demo:
26
- gr.Markdown("# Image Capture and Processing App")
27
- gr.Markdown("Upload an image or capture one, and convert it to grayscale!")
28
 
29
  # Input section
30
  image_input = gr.Image(type="pil", label="Upload or Capture Image")
31
 
32
  # Output section
33
- output_image = gr.Image(type="pil", label="Processed Image")
34
 
35
- # Button to process the image
36
- process_button = gr.Button("Process Image")
37
 
38
- # Link button to the function
39
- process_button.click(process_image, inputs=[image_input], outputs=output_image)
40
 
41
  demo.launch()
 
 
1
  import cv2
2
+ import gradio as gr
3
+ from ultralytics import YOLO
4
  from PIL import Image
5
+ import numpy as np
6
+
7
+ # Load the YOLOv5 model (pre-trained on COCO dataset)
8
+ model = YOLO('yolov8n.pt') # You can replace with your custom model if available
9
 
10
+ def detect_objects(image):
11
  """
12
+ Detect suspicious objects in the image using YOLO.
13
  """
14
+ # Convert PIL image to OpenCV format (numpy array)
15
+ image_np = np.array(image)
16
+ results = model(image_np) # Perform detection
17
+
18
+ # Draw bounding boxes on the detected objects
19
+ for result in results:
20
+ boxes = result.boxes # Bounding boxes
21
+ for box in boxes:
22
+ x1, y1, x2, y2 = map(int, box.xyxy[0]) # Extract box coordinates
23
+ label = box.cls[0] # Class label
24
+ confidence = box.conf[0] # Confidence score
25
+
26
+ # Draw rectangle and label on the image
27
+ cv2.rectangle(image_np, (x1, y1), (x2, y2), (0, 255, 0), 2)
28
+ text = f"{model.names[int(label)]} ({confidence:.2f})"
29
+ cv2.putText(image_np, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
30
+
31
+ # Convert back to PIL image
32
+ processed_image = Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB))
33
+ return processed_image
34
+
35
+ # Create a Gradio interface
36
  with gr.Blocks() as demo:
37
+ gr.Markdown("# Suspicious Object Detection")
38
+ gr.Markdown("Upload an image or use your webcam to capture one. The app will detect objects using YOLOv5.")
39
 
40
  # Input section
41
  image_input = gr.Image(type="pil", label="Upload or Capture Image")
42
 
43
  # Output section
44
+ output_image = gr.Image(type="pil", label="Processed Image with Annotations")
45
 
46
+ # Button for detection
47
+ detect_button = gr.Button("Detect Suspicious Objects")
48
 
49
+ # Link the button to the detection function
50
+ detect_button.click(detect_objects, inputs=[image_input], outputs=[output_image])
51
 
52
  demo.launch()