edeler commited on
Commit
c151ef0
·
verified ·
1 Parent(s): 2def4f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -29,7 +29,7 @@ model = YOLO(model_path).to(device)
29
 
30
  # Define the detection function for Gradio
31
  @spaces.GPU # Decorator to allocate GPU for ZeroGPU-enabled Spaces
32
- def detect_objects(image: np.ndarray) -> Image.Image:
33
  # Ensure the image is in BGR format if provided by PIL (Gradio gives us an RGB image)
34
  if image.shape[-1] == 3:
35
  image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
@@ -57,6 +57,9 @@ def detect_objects(image: np.ndarray) -> Image.Image:
57
  # Apply Non-Maximum Suppression (NMS) to the detections to avoid duplicate boxes
58
  detections = detections.with_nms(threshold=NMS_THRESHOLD, class_agnostic=False)
59
 
 
 
 
60
  # Initialize an annotator for bounding boxes with specified color and thickness
61
  box_annotator = sv.OrientedBoxAnnotator(color=ANNOTATION_COLOR, thickness=ANNOTATION_THICKNESS)
62
 
@@ -65,11 +68,13 @@ def detect_objects(image: np.ndarray) -> Image.Image:
65
 
66
  # Convert annotated image to RGB for Gradio display (PIL expects RGB)
67
  annotated_img_rgb = cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB)
68
- return Image.fromarray(annotated_img_rgb)
 
 
69
 
70
  # Reset function for Gradio UI
71
  def gradio_reset():
72
- return gr.update(value=None), gr.update(value=None)
73
 
74
  # Set up Gradio interface
75
  with gr.Blocks() as demo:
@@ -83,6 +88,7 @@ with gr.Blocks() as demo:
83
 
84
  with gr.Column():
85
  output_img = gr.Image(label="Detection Result", interactive=False)
 
86
 
87
  # Add Examples section with images from the root directory
88
  with gr.Accordion("Select an Example Image"):
@@ -94,8 +100,8 @@ with gr.Blocks() as demo:
94
  )
95
 
96
  # Define button actions
97
- clear.click(gradio_reset, inputs=None, outputs=[input_img, output_img])
98
- predict.click(detect_objects, inputs=[input_img], outputs=[output_img])
99
 
100
  # Launch Gradio app
101
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
 
29
 
30
  # Define the detection function for Gradio
31
  @spaces.GPU # Decorator to allocate GPU for ZeroGPU-enabled Spaces
32
+ def detect_objects(image: np.ndarray) -> (Image.Image, str):
33
  # Ensure the image is in BGR format if provided by PIL (Gradio gives us an RGB image)
34
  if image.shape[-1] == 3:
35
  image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
 
57
  # Apply Non-Maximum Suppression (NMS) to the detections to avoid duplicate boxes
58
  detections = detections.with_nms(threshold=NMS_THRESHOLD, class_agnostic=False)
59
 
60
+ # Count total detections after NMS
61
+ total_detections = len(detections)
62
+
63
  # Initialize an annotator for bounding boxes with specified color and thickness
64
  box_annotator = sv.OrientedBoxAnnotator(color=ANNOTATION_COLOR, thickness=ANNOTATION_THICKNESS)
65
 
 
68
 
69
  # Convert annotated image to RGB for Gradio display (PIL expects RGB)
70
  annotated_img_rgb = cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB)
71
+
72
+ # Return the annotated image and the total count of detections
73
+ return Image.fromarray(annotated_img_rgb), f"Total Detections: {total_detections}"
74
 
75
  # Reset function for Gradio UI
76
  def gradio_reset():
77
+ return gr.update(value=None), gr.update(value=None), gr.update(value="")
78
 
79
  # Set up Gradio interface
80
  with gr.Blocks() as demo:
 
88
 
89
  with gr.Column():
90
  output_img = gr.Image(label="Detection Result", interactive=False)
91
+ detection_count = gr.Textbox(label="Detection Summary", interactive=False)
92
 
93
  # Add Examples section with images from the root directory
94
  with gr.Accordion("Select an Example Image"):
 
100
  )
101
 
102
  # Define button actions
103
+ clear.click(gradio_reset, inputs=None, outputs=[input_img, output_img, detection_count])
104
+ predict.click(detect_objects, inputs=[input_img], outputs=[output_img, detection_count])
105
 
106
  # Launch Gradio app
107
  demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)