miojizzy commited on
Commit
b42cdae
·
1 Parent(s): 460bbce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -25
app.py CHANGED
@@ -17,15 +17,24 @@ def video_change(v, vc):
17
  fms = vc.get(cv.CAP_PROP_FRAME_COUNT)
18
  w = vc.get(cv.CAP_PROP_FRAME_WIDTH)
19
  h = vc.get(cv.CAP_PROP_FRAME_HEIGHT)
20
- return fps, fms, w, h, gr.update(maximum=int(fms)-1), vc
21
-
22
- def get_nth_frame(vc, n):
 
 
 
 
 
 
 
 
 
23
  vc.set(cv.CAP_PROP_POS_FRAMES, n)
24
  ok, img = vc.read()
25
  img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
26
- return img
27
 
28
- def make_dataset(vc, df):
29
  path = os.path.dirname(__file__)+"/"+str(uuid.uuid1())
30
  os.mkdir(path)
31
  os.mkdir(path+"/train")
@@ -42,7 +51,7 @@ def make_dataset(vc, df):
42
  if not ok:
43
  break
44
  name = "image_%08d.jpg"%(idx)
45
- cv.imwrite(path+"/train/"+name, img)
46
  zf.write(path+"/train/"+name, arcname="train/"+name)
47
  os.remove(path+"/train/"+name)
48
  label_df = pd.concat([label_df, pd.DataFrame([[name, label]], columns=headers)])
@@ -53,31 +62,45 @@ def make_dataset(vc, df):
53
 
54
  with gr.Blocks() as demo:
55
  with gr.Tab("Create Image Classify Dataset"):
56
- with gr.Row():
57
- with gr.Column():
58
- state_vc = gr.State(value=None)
59
- input_video = gr.Video()
60
- with gr.Accordion('Video Info'):
61
- output_fps = gr.Textbox(lines=1, label="fps")
62
- output_fms = gr.Textbox(lines=1, label="frame count")
63
- output_w = gr.Textbox(lines=1, label="width")
64
- output_h = gr.Textbox(lines=1, label="height")
65
- with gr.Column():
66
- output_img = gr.Image()
67
- with gr.Accordion('Show Frame'):
68
- input_n = gr.Slider(0, 9999, value=0, step=1, label="n")
 
 
 
 
 
 
 
 
 
69
  btn = gr.Button(value="Submit")
 
 
70
 
71
- input_video.change(
72
  video_change,
73
  inputs=[input_video, state_vc],
74
- outputs=[output_fps, output_fms, output_w, output_h, input_n, state_vc]
75
- )
76
- btn.click(get_nth_frame, inputs=[state_vc, input_n], outputs=output_img)
 
 
 
 
77
 
78
  gr.Markdown("*****")
79
 
80
-
81
  with gr.Row():
82
  df = gr.Dataframe(
83
  headers=["start_index", "end_index", "label"],
@@ -86,10 +109,14 @@ with gr.Blocks() as demo:
86
  interactive=True,
87
  )
88
  file = gr.File(file_count='multiple')
 
89
  btn_make_dataset = gr.Button(value="make dataset")
90
- btn_make_dataset.click(make_dataset, inputs=[state_vc, df], outputs=file)
 
 
91
 
92
  gr.Markdown("*****")
 
93
  gr.Examples([os.path.join(os.path.dirname(__file__), "test.mp4")], inputs=input_video)
94
 
95
 
 
17
  fms = vc.get(cv.CAP_PROP_FRAME_COUNT)
18
  w = vc.get(cv.CAP_PROP_FRAME_WIDTH)
19
  h = vc.get(cv.CAP_PROP_FRAME_HEIGHT)
20
+ return [fps, fms, w, h,
21
+ gr.update(maximum=int(fms)-1),
22
+ gr.update(maximum=int(w)),
23
+ gr.update(maximum=int(w), value=int(w)),
24
+ gr.update(maximum=int(h)),
25
+ gr.update(maximum=int(h), value=int(h)),
26
+ vc]
27
+
28
+ def _cut_frame(img, input_w1, input_w2, input_h1, input_h2):
29
+ return img[input_h1:input_h2, input_w1:input_w2]
30
+
31
+ def get_nth_frame(vc, n, input_w1, input_w2, input_h1, input_h2):
32
  vc.set(cv.CAP_PROP_POS_FRAMES, n)
33
  ok, img = vc.read()
34
  img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
35
+ return _cut_frame(img, input_w1, input_w2, input_h1, input_h2)
36
 
37
+ def make_dataset(vc, df, input_w1, input_w2, input_h1, input_h2):
38
  path = os.path.dirname(__file__)+"/"+str(uuid.uuid1())
39
  os.mkdir(path)
40
  os.mkdir(path+"/train")
 
51
  if not ok:
52
  break
53
  name = "image_%08d.jpg"%(idx)
54
+ cv.imwrite(path+"/train/"+name, _cut_frame(img, input_w1, input_w2, input_h1, input_h2))
55
  zf.write(path+"/train/"+name, arcname="train/"+name)
56
  os.remove(path+"/train/"+name)
57
  label_df = pd.concat([label_df, pd.DataFrame([[name, label]], columns=headers)])
 
62
 
63
  with gr.Blocks() as demo:
64
  with gr.Tab("Create Image Classify Dataset"):
65
+
66
+ with gr.Accordion('Step 1: Video Info'):
67
+ with gr.Row():
68
+ with gr.Column():
69
+ state_vc = gr.State(value=None)
70
+ input_video = gr.Video()
71
+ with gr.Column():
72
+ output_fps = gr.Number(label="fps")
73
+ output_fms = gr.Number(label="frame count")
74
+ output_w = gr.Number(label="width")
75
+ output_h = gr.Number(label="height")
76
+
77
+ gr.Markdown("*****")
78
+
79
+ with gr.Accordion('Step 2: Frame Info'):
80
+ with gr.Row():
81
+ with gr.Column():
82
+ input_n = gr.Slider(0, 9999, value=0, step=1, label="nth frame")
83
+ input_w1 = gr.Slider(0, 9999, value=0, step=1, label="w1")
84
+ input_w2 = gr.Slider(0, 9999, value=0, step=1, label="w2")
85
+ input_h1 = gr.Slider(0, 9999, value=0, step=1, label="h1")
86
+ input_h2 = gr.Slider(0, 9999, value=0, step=1, label="h2")
87
  btn = gr.Button(value="Submit")
88
+ with gr.Column():
89
+ output_img = gr.Image()
90
 
91
+ input_video.change(
92
  video_change,
93
  inputs=[input_video, state_vc],
94
+ outputs=[output_fps, output_fms, output_w, output_h,
95
+ input_n, input_w1, input_w2, input_h1, input_h2,
96
+ state_vc]
97
+ )
98
+ btn.click(get_nth_frame,
99
+ inputs=[state_vc, input_n, input_w1, input_w2, input_h1, input_h2],
100
+ outputs=output_img)
101
 
102
  gr.Markdown("*****")
103
 
 
104
  with gr.Row():
105
  df = gr.Dataframe(
106
  headers=["start_index", "end_index", "label"],
 
109
  interactive=True,
110
  )
111
  file = gr.File(file_count='multiple')
112
+
113
  btn_make_dataset = gr.Button(value="make dataset")
114
+ btn_make_dataset.click(make_dataset,
115
+ inputs=[state_vc, df, input_w1, input_w2, input_h1, input_h2 ],
116
+ outputs=file)
117
 
118
  gr.Markdown("*****")
119
+
120
  gr.Examples([os.path.join(os.path.dirname(__file__), "test.mp4")], inputs=input_video)
121
 
122