Spaces:
Sleeping
Sleeping
| import os | |
| import uuid | |
| import cv2 as cv | |
| import pandas as pd | |
| from zipfile import ZipFile | |
| import gradio as gr | |
| def video_change(v, vc): | |
| if vc: | |
| vc.release() | |
| vc = cv.VideoCapture(v) | |
| fps = vc.get(cv.CAP_PROP_FPS) | |
| fms = vc.get(cv.CAP_PROP_FRAME_COUNT) | |
| w = vc.get(cv.CAP_PROP_FRAME_WIDTH) | |
| h = vc.get(cv.CAP_PROP_FRAME_HEIGHT) | |
| return [fps, fms, w, h, | |
| gr.update(maximum=int(fms)-1), | |
| gr.update(maximum=int(w)), | |
| gr.update(maximum=int(w), value=int(w)), | |
| gr.update(maximum=int(h)), | |
| gr.update(maximum=int(h), value=int(h)), | |
| vc] | |
| def _cut_frame(img, input_w1, input_w2, input_h1, input_h2): | |
| return img[input_h1:input_h2, input_w1:input_w2] | |
| def get_nth_frame(vc, n, input_w1, input_w2, input_h1, input_h2): | |
| vc.set(cv.CAP_PROP_POS_FRAMES, n) | |
| ok, img = vc.read() | |
| img = cv.cvtColor(img, cv.COLOR_BGR2RGB) | |
| return _cut_frame(img, input_w1, input_w2, input_h1, input_h2) | |
| def make_dataset(vc, df, input_w1, input_w2, input_h1, input_h2): | |
| path = os.path.dirname(__file__)+"/"+str(uuid.uuid1()) | |
| os.mkdir(path) | |
| os.mkdir(path+"/train") | |
| headers = ["name", "label"] | |
| label_df = pd.DataFrame(columns=headers) | |
| with ZipFile(path+"/train.zip", "w") as zf: | |
| for line in df.itertuples(): | |
| start_idx = int(line[1]) | |
| end_idx = int(line[2]) | |
| label = int(line[3]) | |
| vc.set(cv.CAP_PROP_POS_FRAMES, start_idx) | |
| for idx in range(start_idx, end_idx): | |
| ok, img = vc.read() | |
| if not ok: | |
| break | |
| name = "image_%08d.jpg"%(idx) | |
| cv.imwrite(path+"/train/"+name, _cut_frame(img, input_w1, input_w2, input_h1, input_h2)) | |
| zf.write(path+"/train/"+name, arcname="train/"+name) | |
| os.remove(path+"/train/"+name) | |
| label_df = pd.concat([label_df, pd.DataFrame([[name, label]], columns=headers)]) | |
| label_df.to_csv(path+'/label.csv', index=False) | |
| return [path+"/label.csv", path+"/train.zip"] | |
| with gr.Blocks() as demo: | |
| with gr.Tab("Create Image Classify Dataset"): | |
| with gr.Accordion('Step 1: Video Info'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| state_vc = gr.State(value=None) | |
| input_video = gr.Video() | |
| with gr.Column(): | |
| output_fps = gr.Number(label="fps") | |
| output_fms = gr.Number(label="frame count") | |
| output_w = gr.Number(label="width") | |
| output_h = gr.Number(label="height") | |
| gr.Markdown("*****") | |
| with gr.Accordion('Step 2: Frame Info'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_n = gr.Slider(0, 9999, value=0, step=1, label="nth frame") | |
| input_w1 = gr.Slider(0, 9999, value=0, step=1, label="w1") | |
| input_w2 = gr.Slider(0, 9999, value=0, step=1, label="w2") | |
| input_h1 = gr.Slider(0, 9999, value=0, step=1, label="h1") | |
| input_h2 = gr.Slider(0, 9999, value=0, step=1, label="h2") | |
| btn = gr.Button(value="Submit") | |
| with gr.Column(): | |
| output_img = gr.Image() | |
| input_video.change( | |
| video_change, | |
| inputs=[input_video, state_vc], | |
| outputs=[output_fps, output_fms, output_w, output_h, | |
| input_n, input_w1, input_w2, input_h1, input_h2, | |
| state_vc] | |
| ) | |
| btn.click(get_nth_frame, | |
| inputs=[state_vc, input_n, input_w1, input_w2, input_h1, input_h2], | |
| outputs=output_img) | |
| gr.Markdown("*****") | |
| with gr.Row(): | |
| df = gr.Dataframe( | |
| headers=["start_index", "end_index", "label"], | |
| datatype=["number", "number", "number"], | |
| height=500, | |
| interactive=True, | |
| ) | |
| file = gr.File(file_count='multiple') | |
| btn_make_dataset = gr.Button(value="make dataset") | |
| btn_make_dataset.click(make_dataset, | |
| inputs=[state_vc, df, input_w1, input_w2, input_h1, input_h2 ], | |
| outputs=file) | |
| gr.Markdown("*****") | |
| gr.Examples([os.path.join(os.path.dirname(__file__), "test.mp4")], inputs=input_video) | |
| if __name__ == "__main__": | |
| demo.queue().launch(debug=True) |