Ashutoshbk commited on
Commit
1d9fcca
·
1 Parent(s): f066199

video size added upto 50

Browse files
Files changed (2) hide show
  1. app.py +67 -39
  2. main.py +0 -71
app.py CHANGED
@@ -4,59 +4,87 @@ import os
4
  import tempfile
5
 
6
  from ultralytics import YOLO
7
- model = YOLO(r"best.pt") # initialize model
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  def process_video(video_file):
 
 
 
 
 
11
  # Create a temporary folder to save extracted frames
12
  temp_dir = tempfile.mkdtemp()
13
-
14
  # Capture the video using OpenCV
15
  video = cv2.VideoCapture(video_file)
 
 
 
16
  fps = int(video.get(cv2.CAP_PROP_FPS))
17
- frame_rate = fps // 5 # Extract 5 frames per second
 
 
 
18
 
19
  frame_count = 0
20
  extracted_frames = []
21
 
22
- while True:
23
- ret, frame = video.read()
24
- if not ret:
25
- break
26
-
27
- # Extract every nth frame according to the frame_rate
28
- if frame_count % frame_rate == 0:
29
- # Resize the frame if it's larger than 1280x720
30
- h, w, _ = frame.shape
31
- if w > 1280 or h > 720:
32
- frame = cv2.resize(frame, (1280, 720))
33
-
34
- # Save the frame to the temporary directory
35
- frame_path = os.path.join(temp_dir, f"frame_{frame_count}.jpg")
36
- cv2.imwrite(frame_path, frame)
37
- results = model.predict(source=frame_path, conf=0.2, save=False, show=False, line_width=2, show_labels=True, show_boxes=True, retina_masks=True)
38
- for result in results:
39
- boxes = result.boxes
40
- # result.save(filename=frame_path)
41
- # Check if any detections (boxes or masks, keypoints, etc.) exist
42
- if boxes is not None and len(boxes) > 0: # If boxes are present
43
- # Save the result image if detections are found
44
- result.save(filename=frame_path)
45
- else:
46
- # If no detections, ensure the file doesn't exist
47
- if os.path.exists(frame_path):
48
- os.remove(frame_path) # Delete the image if it exists
49
-
50
- # Add the frame path to the list of extracted frames
51
- if os.path.exists(frame_path):
52
- extracted_frames.append(frame_path)
53
-
54
- frame_count += 1
55
-
56
- video.release()
 
 
 
 
 
 
57
 
58
  # Return the list of extracted frame paths for display
59
- return extracted_frames
 
60
 
61
  # Define the Gradio interface
62
  iface = gr.Interface(
 
4
  import tempfile
5
 
6
  from ultralytics import YOLO
7
+
8
+ # Initialize YOLO model
9
+ model_path = r"best.pt"
10
+ if not os.path.exists(model_path):
11
+ raise FileNotFoundError(f"Model file not found at {model_path}")
12
+ model = YOLO(model_path)
13
+
14
+ # Define the maximum file size (100MB)
15
+ MAX_FILE_SIZE_MB = 50 # Limit is set to 100MB
16
+ MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024 # Convert MB to bytes
17
+
18
 
19
 
20
  def process_video(video_file):
21
+ # Check the size of the uploaded video
22
+ video_size = os.path.getsize(video_file)
23
+ if video_size > MAX_FILE_SIZE_BYTES:
24
+ return f"Error: The uploaded video exceeds the 100MB size limit. Please upload a smaller video."
25
+
26
  # Create a temporary folder to save extracted frames
27
  temp_dir = tempfile.mkdtemp()
28
+
29
  # Capture the video using OpenCV
30
  video = cv2.VideoCapture(video_file)
31
+ if not video.isOpened():
32
+ return f"Error: Unable to open video file {video_file}"
33
+
34
  fps = int(video.get(cv2.CAP_PROP_FPS))
35
+ if fps == 0:
36
+ return "Error: Video FPS is 0 or not detected."
37
+
38
+ frame_rate = max(1, fps // 5) # Extract 5 frames per second, ensure at least 1 frame/sec
39
 
40
  frame_count = 0
41
  extracted_frames = []
42
 
43
+ try:
44
+ while True:
45
+ ret, frame = video.read()
46
+ if not ret:
47
+ break
48
+
49
+ # Extract every nth frame according to the frame_rate
50
+ if frame_count % frame_rate == 0:
51
+ # Resize the frame if it's larger than 1280x720
52
+ h, w, _ = frame.shape
53
+ if w > 1280 or h > 720:
54
+ frame = cv2.resize(frame, (1280, 720))
55
+
56
+ # Save the frame to the temporary directory
57
+ frame_path = os.path.join(temp_dir, f"frame_{frame_count}.jpg")
58
+ cv2.imwrite(frame_path, frame)
59
+
60
+ # Perform detection using the YOLO model
61
+ results = model.predict(source=frame_path, conf=0.002, save=True, show=False,
62
+ line_width=2, show_labels=True, show_boxes=True, retina_masks=True)
63
+
64
+ for result in results:
65
+ boxes = result.boxes
66
+ if boxes is not None and len(boxes) > 0:
67
+ # Save the result image if detections are found
68
+ result.save(filename=frame_path)
69
+ else:
70
+ # If no detections, delete the frame
71
+ if os.path.exists(frame_path):
72
+ os.remove(frame_path)
73
+
74
+ # Add the frame path to the list of extracted frames if it still exists
75
+ if os.path.exists(frame_path):
76
+ extracted_frames.append(frame_path)
77
+
78
+ frame_count += 1
79
+ finally:
80
+ video.release()
81
+ # Clean up temp directory after processing
82
+ if len(extracted_frames) == 0:
83
+ shutil.rmtree(temp_dir)
84
 
85
  # Return the list of extracted frame paths for display
86
+ return extracted_frames if extracted_frames else ["No frames extracted or no detections found."]
87
+
88
 
89
  # Define the Gradio interface
90
  iface = gr.Interface(
main.py DELETED
@@ -1,71 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import os
4
- import tempfile
5
-
6
- from ultralytics import YOLO
7
- model = YOLO(r"best.pt") # initialize model
8
-
9
-
10
- def process_video(video_file):
11
- # Create a temporary folder to save extracted frames
12
- temp_dir = tempfile.mkdtemp()
13
-
14
- # Capture the video using OpenCV
15
- video = cv2.VideoCapture(video_file)
16
- fps = int(video.get(cv2.CAP_PROP_FPS))
17
- frame_rate = fps // 5 # Extract 5 frames per second
18
-
19
- frame_count = 0
20
- extracted_frames = []
21
-
22
- while True:
23
- ret, frame = video.read()
24
- if not ret:
25
- break
26
-
27
- # Extract every nth frame according to the frame_rate
28
- if frame_count % frame_rate == 0:
29
- # Resize the frame if it's larger than 1280x720
30
- h, w, _ = frame.shape
31
- if w > 1280 or h > 720:
32
- frame = cv2.resize(frame, (1280, 720))
33
-
34
- # Save the frame to the temporary directory
35
- frame_path = os.path.join(temp_dir, f"frame_{frame_count}.jpg")
36
- cv2.imwrite(frame_path, frame)
37
- results = model.predict(source=frame_path, conf=0.002, save=False, show=False, line_width=2, show_labels=True, show_boxes=True, retina_masks=True)
38
- for result in results:
39
- boxes = result.boxes
40
- # result.save(filename=frame_path)
41
- # Check if any detections (boxes or masks, keypoints, etc.) exist
42
- if boxes is not None and len(boxes) > 0: # If boxes are present
43
- # Save the result image if detections are found
44
- result.save(filename=frame_path)
45
- else:
46
- # If no detections, ensure the file doesn't exist
47
- if os.path.exists(frame_path):
48
- os.remove(frame_path) # Delete the image if it exists
49
-
50
- # Add the frame path to the list of extracted frames
51
- if os.path.exists(frame_path):
52
- extracted_frames.append(frame_path)
53
-
54
- frame_count += 1
55
-
56
- video.release()
57
-
58
- # Return the list of extracted frame paths for display
59
- return extracted_frames
60
-
61
- # Define the Gradio interface
62
- iface = gr.Interface(
63
- fn=process_video, # Function to process the video and return extracted frames
64
- inputs=gr.Video(label="Upload your video"), # Video upload input
65
- outputs=gr.Gallery(label="Extracted Frames"), # Display extracted frames in a gallery
66
- title="Video Frame Extraction", # Interface title
67
- description="Upload a video, extract 5 frames per second, resize if necessary, and display extracted frames."
68
- )
69
-
70
- # Launch the Gradio app
71
- iface.launch()