Ivan Murabito commited on
Commit
aa13994
·
1 Parent(s): a1c9872

add tabbed interface

Browse files
yologp/frame_extractor_gradio_app.py CHANGED
@@ -1,7 +1,9 @@
 
1
  from pytube import YouTube
2
  import gradio as gr
3
  from pathlib import Path
4
  import os
 
5
  from supervision import (
6
  ImageSink,
7
  get_video_frames_generator,
@@ -10,11 +12,24 @@ from supervision import (
10
  from tqdm import tqdm
11
  from helpers import zoom_center
12
  import shutil
 
13
 
14
  data_path = Path(__file__).parent.parent / "data"
15
  print("DATA PATH: ", data_path)
16
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  def download_youtube_url(url, out_dir) -> str:
19
  yt = YouTube(url=url)
20
  files = yt.streams.filter(file_extension="mp4", only_video=True)
@@ -31,6 +46,7 @@ def extract_frames(
31
  start,
32
  end,
33
  resize_w,
 
34
  zoom,
35
  progress=gr.Progress(track_tqdm=True),
36
  ):
@@ -44,19 +60,25 @@ def extract_frames(
44
  video_name = str(v_path.stem).replace(" ", "")
45
  target_dir = Path(f"{data_path}/{video_name}_frames")
46
  cont = 0
47
- with ImageSink(
48
  target_dir_path=target_dir,
49
  image_name_pattern="image_{:05d}.jpg",
50
  overwrite=True,
51
  ) as sink:
52
  for image in tqdm(
53
  get_video_frames_generator(
54
- source_path=str(v_path), stride=stride, start=start
 
 
 
55
  )
56
  ):
57
  if zoom > 1:
58
  image = zoom_center(img=image.copy(), zoom_factor=zoom)
59
- sink.save_image(image=image.copy())
 
 
 
60
  cont += 1
61
  progress(0.8, "Zipping..")
62
  print("Target_dir", target_dir)
@@ -81,17 +103,18 @@ inputs = [
81
  gr.Number(label="Start Frame", value=0),
82
  gr.Number(label="End Frame", value=-1),
83
  gr.Number(label="Resize Width (px)", value=-1),
 
84
  gr.Slider(label="Image Zoom", minimum=1.0, maximum=2.99, value=1.4),
85
  ]
86
  outputs = [gr.Gallery(label="preview"), gr.File()]
87
- interface = gr.Interface(
88
  fn=extract_frames,
89
  inputs=inputs,
90
  outputs=outputs,
91
  examples=[["https://www.youtube.com/watch?v=XDhjS_fzhsQ"]],
92
- allow_flagging=False,
93
  )
94
 
95
 
96
  if __name__ == "__main__":
97
- interface.queue(max_size=10).launch(server_name="0.0.0.0")
 
1
+ from typing import Optional
2
  from pytube import YouTube
3
  import gradio as gr
4
  from pathlib import Path
5
  import os
6
+ import cv2
7
  from supervision import (
8
  ImageSink,
9
  get_video_frames_generator,
 
12
  from tqdm import tqdm
13
  from helpers import zoom_center
14
  import shutil
15
+ import numpy as np
16
 
17
  data_path = Path(__file__).parent.parent / "data"
18
  print("DATA PATH: ", data_path)
19
 
20
 
21
+ class MyImageSink(ImageSink):
22
+ def save_image(
23
+ self, image: np.ndarray, image_name: Optional[str] = None, quality: int = 70
24
+ ):
25
+ if image_name is None:
26
+ image_name = self.image_name_pattern.format(self.image_count)
27
+
28
+ image_path = os.path.join(self.target_dir_path, image_name)
29
+ cv2.imwrite(image_path, image, [cv2.IMWRITE_JPEG_QUALITY, quality])
30
+ self.image_count += 1
31
+
32
+
33
  def download_youtube_url(url, out_dir) -> str:
34
  yt = YouTube(url=url)
35
  files = yt.streams.filter(file_extension="mp4", only_video=True)
 
46
  start,
47
  end,
48
  resize_w,
49
+ quality,
50
  zoom,
51
  progress=gr.Progress(track_tqdm=True),
52
  ):
 
60
  video_name = str(v_path.stem).replace(" ", "")
61
  target_dir = Path(f"{data_path}/{video_name}_frames")
62
  cont = 0
63
+ with MyImageSink(
64
  target_dir_path=target_dir,
65
  image_name_pattern="image_{:05d}.jpg",
66
  overwrite=True,
67
  ) as sink:
68
  for image in tqdm(
69
  get_video_frames_generator(
70
+ source_path=str(v_path),
71
+ stride=stride,
72
+ start=start,
73
+ end=end if end != -1 else None,
74
  )
75
  ):
76
  if zoom > 1:
77
  image = zoom_center(img=image.copy(), zoom_factor=zoom)
78
+ sink.save_image(
79
+ image=image.copy(),
80
+ quality=quality,
81
+ )
82
  cont += 1
83
  progress(0.8, "Zipping..")
84
  print("Target_dir", target_dir)
 
103
  gr.Number(label="Start Frame", value=0),
104
  gr.Number(label="End Frame", value=-1),
105
  gr.Number(label="Resize Width (px)", value=-1),
106
+ gr.Slider(label="Quality", minimum=0, maximum=100, value=70),
107
  gr.Slider(label="Image Zoom", minimum=1.0, maximum=2.99, value=1.4),
108
  ]
109
  outputs = [gr.Gallery(label="preview"), gr.File()]
110
+ frame_ext_interface = gr.Interface(
111
  fn=extract_frames,
112
  inputs=inputs,
113
  outputs=outputs,
114
  examples=[["https://www.youtube.com/watch?v=XDhjS_fzhsQ"]],
115
+ allow_flagging="never",
116
  )
117
 
118
 
119
  if __name__ == "__main__":
120
+ frame_ext_interface.queue(max_size=10).launch(server_name="0.0.0.0")
yologp/gradio_app.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from frame_extractor_gradio_app import frame_ext_interface
4
+ from inference_gradio_app import inference_interface
5
+
6
+
7
+ tabbed_interface = gr.TabbedInterface(
8
+ interface_list=[inference_interface, frame_ext_interface],
9
+ tab_names=["Inference", "Extract Frame"],
10
+ )
11
+
12
+ if __name__ == "__main__":
13
+ tabbed_interface.queue(max_size=10).launch(server_name="0.0.0.0")
yologp/inference_gradio_app.py CHANGED
@@ -40,7 +40,7 @@ def inference(image, conf: float, iou: float, progress=gr.Progress()):
40
  return frame
41
 
42
 
43
- with gr.Blocks() as inference_app:
44
  gr.Markdown("# 🏍️ YoloGP: Motogp tracker")
45
  with gr.Row():
46
  with gr.Column():
@@ -60,7 +60,26 @@ with gr.Blocks() as inference_app:
60
  with gr.Column():
61
  output_im = gr.Image()
62
 
63
- button.click(fn=inference, inputs=[image, conf, iou], outputs=output_im)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  if __name__ == "__main__":
66
- inference_app.queue(max_size=10).launch(server_name="0.0.0.0")
 
 
40
  return frame
41
 
42
 
43
+ """ with gr.Blocks() as inference_app:
44
  gr.Markdown("# 🏍️ YoloGP: Motogp tracker")
45
  with gr.Row():
46
  with gr.Column():
 
60
  with gr.Column():
61
  output_im = gr.Image()
62
 
63
+ button.click(fn=inference, inputs=[image, conf, iou], outputs=output_im) """
64
+
65
+
66
+ inference_interface = gr.Interface(
67
+ description="# 🏍️ YoloGP: Motogp tracker (YoloV8 nano, detection & segmentation)",
68
+ fn=inference,
69
+ inputs=[
70
+ gr.Image(),
71
+ gr.Slider(label="Confidence", minimum=0, maximum=0.99, value=0.3),
72
+ gr.Slider(label="IoU", minimum=0, maximum=0.99, value=0.45),
73
+ ],
74
+ outputs=[gr.Image()],
75
+ examples=[
76
+ ["./assets/Rossi_Lorenzo_Catalunya2009.png"],
77
+ ["./assets/sample1.png"],
78
+ ],
79
+ allow_flagging="never",
80
+ )
81
+
82
 
83
  if __name__ == "__main__":
84
+ inference_interface.queue().launch(server_name="0.0.0.0")
85
+ # inference_app.queue(max_size=10).launch(server_name="0.0.0.0")