dead-shadow-7 commited on
Commit
c58389b
·
1 Parent(s): 4fce38b

Revert back changes

Browse files
Files changed (1) hide show
  1. app.py +18 -61
app.py CHANGED
@@ -2,78 +2,35 @@ import torch
2
  import gradio as gr
3
  from PIL import Image
4
  import io
5
- import cv2
6
- import numpy as np
7
- import tempfile
8
 
9
  # Load the YOLOv5 model
10
- model = torch.hub.load('ultralytics/yolov5', 'custom', path='fire.pt', trust_repo=True) # Load custom model
11
 
12
  def detect_objects(image):
13
- # Run the YOLOv5 model on an image
14
  results = model(image)
15
- results_image = results.render()[0] # Render results as a list, take the first element
16
- results_image = Image.fromarray(results_image)
17
-
18
- return results_image
19
 
20
- def process_video(video_path):
21
- # Read video frames
22
- cap = cv2.VideoCapture(video_path)
23
-
24
- # Get video information
25
- fps = cap.get(cv2.CAP_PROP_FPS)
26
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
27
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
28
- fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec
29
-
30
- # Temporary file to store the output video
31
- temp_output = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
32
-
33
- out = cv2.VideoWriter(temp_output.name, fourcc, fps, (width, height))
34
 
35
- while True:
36
- ret, frame = cap.read()
37
- if not ret:
38
- break
39
-
40
- # Convert frame (BGR to RGB)
41
- rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
42
-
43
- # Run YOLOv5 on the frame
44
- results = model(rgb_frame)
45
-
46
- # Render the results
47
- results_frame = results.render()[0]
48
-
49
- # Convert back to BGR for OpenCV
50
- results_frame_bgr = cv2.cvtColor(results_frame, cv2.COLOR_RGB2BGR)
51
-
52
- # Write frame to output video
53
- out.write(results_frame_bgr)
54
-
55
- cap.release()
56
- out.release()
57
-
58
- return temp_output.name
59
 
60
- # Gradio interface function
61
- def detect(input_image=None, input_video=None):
62
- if input_video is not None: # If a video is uploaded
63
- return process_video(input_video), None # Return the video, no image
64
- elif input_image is not None: # If an image is uploaded
65
- return None, detect_objects(input_image) # Return the image, no video
66
- else:
67
- return None, None
68
 
69
  # Gradio interface
70
  interface = gr.Interface(
71
- fn=detect,
72
- inputs=[gr.Image(type="pil", label="Upload Image"), gr.Video(label="Upload Video")], # Two separate inputs
73
- outputs=[gr.Video(label="Processed Video"), gr.Image(type="pil", label="Processed Image")],
74
- title="YOLOv5 Image & Video Detection",
75
- description="Upload an image or video to detect objects using YOLOv5."
76
  )
77
 
78
  # Launch the Gradio app
79
- interface.launch()
 
2
  import gradio as gr
3
  from PIL import Image
4
  import io
 
 
 
5
 
6
  # Load the YOLOv5 model
7
+ model = torch.hub.load('ultralytics/yolov5', 'custom', path='fire.pt') # Load custom model
8
 
9
  def detect_objects(image):
10
+ # Run the YOLOv5 model
11
  results = model(image)
 
 
 
 
12
 
13
+ # Save the results image
14
+ results_image = results.render()[0] # Render returns a list, we take the first element
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Convert the numpy array result to an image
17
+ results_image = Image.fromarray(results_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # Save to a buffer
20
+ buf = io.BytesIO()
21
+ results_image.save(buf, format='JPEG')
22
+ byte_im = buf.getvalue()
23
+
24
+ return results_image
 
 
25
 
26
  # Gradio interface
27
  interface = gr.Interface(
28
+ fn=detect_objects,
29
+ inputs=gr.Image(type="pil"),
30
+ outputs=gr.Image(type="pil"),
31
+ title="YOLOv5 Image Detection",
32
+ description="Upload an image to detect objects using YOLOv5."
33
  )
34
 
35
  # Launch the Gradio app
36
+ interface.launch()