Nawinkumar15 commited on
Commit
4da2efa
ยท
verified ยท
1 Parent(s): c5609c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -24
app.py CHANGED
@@ -1,36 +1,33 @@
1
  import gradio as gr
2
  from ultralytics import YOLO
3
- import tempfile
4
  import cv2
 
 
5
 
6
  # Load YOLOv8 model once
7
- model = YOLO("best.pt")
8
 
9
- # Inference on image
10
  def predict_image(image):
11
- results = model.predict(image)
12
  return results[0].plot()
13
 
14
- # Inference on video
15
  def predict_video(video_path):
16
- # OpenCV video capture
17
  cap = cv2.VideoCapture(video_path)
18
  fps = cap.get(cv2.CAP_PROP_FPS)
19
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) // 2 # Resize for performance
20
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) // 2
21
 
22
- # Output video
23
  temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
24
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
25
- out = cv2.VideoWriter(temp_output.name, fourcc, fps, (width, height))
26
 
27
  while True:
28
  ret, frame = cap.read()
29
  if not ret:
30
  break
31
-
32
- frame = cv2.resize(frame, (width, height)) # Resize to speed up inference
33
- results = model.predict(frame, imgsz=480, conf=0.5, verbose=False) # Lower imgsz = faster
34
  annotated = results[0].plot()
35
  out.write(annotated)
36
 
@@ -38,23 +35,39 @@ def predict_video(video_path):
38
  out.release()
39
  return temp_output.name
40
 
41
- # Gradio Interface
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  with gr.Blocks() as demo:
43
- gr.Markdown("# ๐Ÿš€ Optimized YOLOv8 Detection\nFast & Accurate on Images and Videos")
 
44
 
45
- with gr.Tab("Image"):
46
  img_input = gr.Image(type="pil")
47
- img_output = gr.Image(label="Detected")
48
- img_btn = gr.Button("Run Detection")
49
  img_btn.click(predict_image, inputs=img_input, outputs=img_output)
50
 
51
- with gr.Tab("Video"):
52
  vid_input = gr.Video()
53
  vid_output = gr.Video()
54
- vid_btn = gr.Button("Run Detection on Video")
55
  vid_btn.click(predict_video, inputs=vid_input, outputs=vid_output)
56
 
57
- demo.launch()
58
-
59
-
 
60
 
 
 
1
  import gradio as gr
2
  from ultralytics import YOLO
 
3
  import cv2
4
+ import tempfile
5
+ import numpy as np
6
 
7
  # Load YOLOv8 model once
8
+ model = YOLO("best.pt")
9
 
10
+ # ๐Ÿ“ธ Image prediction
11
  def predict_image(image):
12
+ results = model.predict(image, imgsz=320, conf=0.4, verbose=False)
13
  return results[0].plot()
14
 
15
+ # ๐ŸŽฅ Video file prediction
16
  def predict_video(video_path):
 
17
  cap = cv2.VideoCapture(video_path)
18
  fps = cap.get(cv2.CAP_PROP_FPS)
19
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) // 2)
20
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) // 2)
21
 
 
22
  temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
23
+ out = cv2.VideoWriter(temp_output.name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
 
24
 
25
  while True:
26
  ret, frame = cap.read()
27
  if not ret:
28
  break
29
+ frame = cv2.resize(frame, (width, height))
30
+ results = model.predict(frame, imgsz=320, conf=0.4, verbose=False)
 
31
  annotated = results[0].plot()
32
  out.write(annotated)
33
 
 
35
  out.release()
36
  return temp_output.name
37
 
38
+ # ๐Ÿ”ด Live webcam stream detection
39
+ def live_feed():
40
+ cap = cv2.VideoCapture(0)
41
+ while True:
42
+ ret, frame = cap.read()
43
+ if not ret:
44
+ break
45
+ frame = cv2.resize(frame, (640, 480))
46
+ results = model.predict(frame, imgsz=320, conf=0.4, verbose=False)
47
+ annotated = results[0].plot()
48
+ yield annotated
49
+ cap.release()
50
+
51
+ # ๐Ÿš€ Gradio App
52
  with gr.Blocks() as demo:
53
+ gr.Markdown("# ๐Ÿ” YOLOv8 Object Detection")
54
+ gr.Markdown("Upload an image or video, or use your webcam for real-time detection.")
55
 
56
+ with gr.Tab("๐Ÿ–ผ๏ธ Image Detection"):
57
  img_input = gr.Image(type="pil")
58
+ img_output = gr.Image()
59
+ img_btn = gr.Button("Detect Objects")
60
  img_btn.click(predict_image, inputs=img_input, outputs=img_output)
61
 
62
+ with gr.Tab("๐ŸŽž๏ธ Video Detection"):
63
  vid_input = gr.Video()
64
  vid_output = gr.Video()
65
+ vid_btn = gr.Button("Detect in Video")
66
  vid_btn.click(predict_video, inputs=vid_input, outputs=vid_output)
67
 
68
+ with gr.Tab("๐Ÿ“ท Live Webcam"):
69
+ gr.Markdown("Note: Webcam streaming may not work on Hugging Face Spaces.")
70
+ live_img = gr.Image(streaming=True)
71
+ live_img.stream(live_feed)
72
 
73
+ demo.launch()