Shatha2030 commited on
Commit
9a4530a
·
verified ·
1 Parent(s): 1eabc98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -125
app.py CHANGED
@@ -1,159 +1,83 @@
1
  import gradio as gr
2
  from ultralytics import YOLO
3
- from PIL import Image
4
  import cv2
5
  import numpy as np
6
  import tempfile
7
- import os
8
- from pathlib import Path
9
 
10
- # Load the YOLOv8 model
11
  model = YOLO('yolov8n.pt')
12
 
13
  def process_image(image):
14
- """
15
- Process a single image for object detection
16
- """
17
  results = model(image)
18
- # Get detection information
19
  boxes = results[0].boxes
20
- detection_info = []
21
- for box in boxes:
22
- class_id = int(box.cls[0])
23
- class_name = results[0].names[class_id]
24
- confidence = float(box.conf[0])
25
- detection_info.append(f"{class_name}: {confidence:.2%}")
26
-
27
  return Image.fromarray(results[0].plot()), "\n".join(detection_info)
28
 
29
  def process_video(video_path):
30
- """
31
- Process video for object detection
32
- """
33
- with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file:
34
- output_path = temp_file.name
35
 
36
  cap = cv2.VideoCapture(video_path)
37
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
38
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
39
- fps = int(cap.get(cv2.CAP_PROP_FPS))
40
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
41
-
42
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
43
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
44
 
 
45
  detection_summary = []
46
- frame_count = 0
47
 
48
- try:
49
- while cap.isOpened():
50
- ret, frame = cap.read()
51
- if not ret:
52
- break
53
-
54
- frame_count += 1
55
- results = model(frame)
56
 
57
- # Collect detection information for this frame
58
- if frame_count % int(fps) == 0: # Sample every second
59
- for box in results[0].boxes:
60
- class_id = int(box.cls[0])
61
- class_name = results[0].names[class_id]
62
- detection_summary.append(class_name)
63
-
64
- annotated_frame = results[0].plot()
65
- out.write(annotated_frame)
66
-
67
- finally:
68
- cap.release()
69
- out.release()
70
-
71
- # Create summary of detected objects
72
- if detection_summary:
73
- from collections import Counter
74
- counts = Counter(detection_summary)
75
- summary = "\n".join([f"{obj}: {count} occurrences" for obj, count in counts.most_common()])
76
- else:
77
- summary = "No objects detected"
78
-
79
  return output_path, summary
80
 
81
  def detect_objects(media):
82
- """
83
- Unified function to handle both image and video inputs
84
- """
85
  if media is None:
86
- return None, None, None, "Please upload an image or video to begin detection.", gr.update(visible=True), gr.update(visible=False)
87
-
88
  try:
89
  if isinstance(media, str) and media.lower().endswith(('.mp4', '.avi', '.mov')):
90
  output_video, detection_summary = process_video(media)
91
- return (None, output_video, detection_summary,
92
- "✅ Video processing complete! Check the detection summary below.",
93
- gr.update(visible=False), gr.update(visible=True))
94
  else:
95
- if isinstance(media, str):
96
- image = cv2.imread(media)
97
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
98
- else:
99
- image = media
100
  processed_image, detection_info = process_image(image)
101
- return (processed_image, None, detection_info,
102
- "✅ Image processing complete! Check the detections below.",
103
- gr.update(visible=True), gr.update(visible=False))
104
  except Exception as e:
105
- return None, None, None, f"❌ Error: {str(e)}", gr.update(visible=False), gr.update(visible=False)
106
 
 
 
 
107
 
108
- # Create Gradio interface
109
- with gr.Blocks(css=custom_css) as demo:
110
- with gr.Column(elem_id="app-container"):
111
-
112
- gr.Markdown("# 🔍 Object Detection")
113
-
114
- # Upload Section
115
- with gr.Column(elem_classes="upload-box"):
116
- gr.Markdown("### 📤 Upload your file")
117
- input_media = gr.File(
118
- label="Drag and drop or click to upload (Images: jpg, jpeg, png | Videos: mp4, avi, mov)",
119
- file_types=["image", "video"]
120
- )
121
-
122
- # Status Message
123
- status_text = gr.Textbox(
124
- label="Status",
125
- value="Waiting for upload...",
126
- interactive=False
127
- )
128
-
129
- # Detection Information
130
- detection_info = gr.Textbox(
131
- label="Detection Results",
132
- elem_classes="detection-info",
133
- interactive=False
134
- )
135
-
136
- # Results Section
137
- with gr.Column(elem_classes="results-container"):
138
- with gr.Row():
139
- with gr.Column(visible=False) as image_column:
140
- output_image = gr.Image(label="Detected Objects")
141
- with gr.Column(visible=False) as video_column:
142
- output_video = gr.Video(label="Processed Video")
143
-
144
- # Handle file upload
145
- input_media.upload(
146
- fn=detect_objects,
147
- inputs=[input_media],
148
- outputs=[
149
- output_image,
150
- output_video,
151
- detection_info,
152
- status_text,
153
- image_column,
154
- video_column
155
- ]
156
- )
157
 
 
158
  if __name__ == "__main__":
159
- demo.launch(share=True)
 
1
  import gradio as gr
2
  from ultralytics import YOLO
 
3
  import cv2
4
  import numpy as np
5
  import tempfile
6
+ from PIL import Image
7
+ from collections import Counter
8
 
9
+ # تحميل نموذج YOLOv8
10
  model = YOLO('yolov8n.pt')
11
 
12
  def process_image(image):
13
+ """ معالجة الصور للكشف عن الكائنات """
 
 
14
  results = model(image)
 
15
  boxes = results[0].boxes
16
+ detection_info = [f"{results[0].names[int(box.cls[0])]}: {float(box.conf[0]):.2%}" for box in boxes]
 
 
 
 
 
 
17
  return Image.fromarray(results[0].plot()), "\n".join(detection_info)
18
 
19
  def process_video(video_path):
20
+ """ معالجة الفيديو للكشف عن الكائنات """
21
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
22
+ output_path = temp_file.name
 
 
23
 
24
  cap = cv2.VideoCapture(video_path)
25
+ width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
26
+ fps, total_frames = int(cap.get(cv2.CAP_PROP_FPS)), int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
 
 
 
 
27
 
28
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
29
  detection_summary = []
 
30
 
31
+ while cap.isOpened():
32
+ ret, frame = cap.read()
33
+ if not ret:
34
+ break
 
 
 
 
35
 
36
+ results = model(frame)
37
+ if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) % int(fps) == 0:
38
+ detection_summary.extend([results[0].names[int(box.cls[0])] for box in results[0].boxes])
39
+
40
+ out.write(results[0].plot())
41
+
42
+ cap.release()
43
+ out.release()
44
+
45
+ summary = "\n".join([f"{obj}: {count} occurrences" for obj, count in Counter(detection_summary).most_common()]) if detection_summary else "No objects detected"
 
 
 
 
 
 
 
 
 
 
 
 
46
  return output_path, summary
47
 
48
  def detect_objects(media):
49
+ """ دالة موحدة للتعامل مع الصور والفيديو """
 
 
50
  if media is None:
51
+ return None, None, "Please upload an image or video.", gr.update(visible=True), gr.update(visible=False)
52
+
53
  try:
54
  if isinstance(media, str) and media.lower().endswith(('.mp4', '.avi', '.mov')):
55
  output_video, detection_summary = process_video(media)
56
+ return None, output_video, detection_summary, "✅ Video processing complete!", gr.update(visible=False), gr.update(visible=True)
 
 
57
  else:
58
+ image = cv2.imread(media) if isinstance(media, str) else media
59
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
 
 
 
60
  processed_image, detection_info = process_image(image)
61
+ return processed_image, None, detection_info, "✅ Image processing complete!", gr.update(visible=True), gr.update(visible=False)
 
 
62
  except Exception as e:
63
+ return None, None, f"❌ Error: {str(e)}", gr.update(visible=False), gr.update(visible=False)
64
 
65
+ # تصميم الواجهة باستخدام Gradio
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("# 🔍 Object Detection")
68
 
69
+ input_media = gr.File(label="Upload Image/Video (jpg, png, mp4, avi)", file_types=["image", "video"])
70
+ status_text = gr.Textbox(label="Status", value="Waiting for upload...", interactive=False)
71
+ detection_info = gr.Textbox(label="Detection Results", interactive=False)
72
+
73
+ with gr.Row():
74
+ with gr.Column(visible=False) as image_column:
75
+ output_image = gr.Image(label="Detected Objects")
76
+ with gr.Column(visible=False) as video_column:
77
+ output_video = gr.Video(label="Processed Video")
78
+
79
+ input_media.upload(detect_objects, inputs=[input_media], outputs=[output_image, output_video, detection_info, status_text, image_column, video_column])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ # تشغيل التطبيق
82
  if __name__ == "__main__":
83
+ demo.launch(share=True)