Ubuntu commited on
Commit
67457af
·
1 Parent(s): 2491068
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +10 -4
  2. app.py +10 -4
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -5,29 +5,35 @@ from encoded_video import EncodedVideo, write_video
5
  import torch
6
  import numpy as np
7
 
8
- def video_identity(video,text):
9
 
10
 
11
  capture = cv2.VideoCapture(video)
12
-
 
 
 
 
 
13
  frameNr = 0
14
  while (True):
15
 
16
  success, frame = capture.read()
17
 
18
  if success:
19
- cv2.imwrite(f'frame_{frameNr}.jpg', frame)
20
 
21
  else:
22
  break
23
 
24
  frameNr = frameNr+10
25
 
26
- img=cv2.imread('frame_0.jpg')
27
 
28
  return img, text
29
  demo = gr.Interface(video_identity,
30
  inputs=[gr.Video(source='upload'),
 
31
  gr.Text()],
32
  outputs=[gr.Image(),
33
  gr.Text()],
 
5
  import torch
6
  import numpy as np
7
 
8
+ def video_identity(video,user_name,class_name):
9
 
10
 
11
  capture = cv2.VideoCapture(video)
12
+ user_d=str(user_name)
13
+ class_d=str(user_name)+'/'+str(class_name)
14
+ if not os.path.exists(user_d):
15
+ os.makedirs(user_d)
16
+ if not os.path.exists(class_d):
17
+ os.makedirs(class_d)
18
  frameNr = 0
19
  while (True):
20
 
21
  success, frame = capture.read()
22
 
23
  if success:
24
+ cv2.imwrite(f'{class_d}/frame_{frameNr}.jpg', frame)
25
 
26
  else:
27
  break
28
 
29
  frameNr = frameNr+10
30
 
31
+ img=cv2.imread(class_d+'/frame_0.jpg')
32
 
33
  return img, text
34
  demo = gr.Interface(video_identity,
35
  inputs=[gr.Video(source='upload'),
36
+ gr.Text(),
37
  gr.Text()],
38
  outputs=[gr.Image(),
39
  gr.Text()],
app.py CHANGED
@@ -5,29 +5,35 @@ from encoded_video import EncodedVideo, write_video
5
  import torch
6
  import numpy as np
7
 
8
- def video_identity(video,text):
9
 
10
 
11
  capture = cv2.VideoCapture(video)
12
-
 
 
 
 
 
13
  frameNr = 0
14
  while (True):
15
 
16
  success, frame = capture.read()
17
 
18
  if success:
19
- cv2.imwrite(f'frame_{frameNr}.jpg', frame)
20
 
21
  else:
22
  break
23
 
24
  frameNr = frameNr+10
25
 
26
- img=cv2.imread('frame_0.jpg')
27
 
28
  return img, text
29
  demo = gr.Interface(video_identity,
30
  inputs=[gr.Video(source='upload'),
 
31
  gr.Text()],
32
  outputs=[gr.Image(),
33
  gr.Text()],
 
5
  import torch
6
  import numpy as np
7
 
8
+ def video_identity(video,user_name,class_name):
9
 
10
 
11
  capture = cv2.VideoCapture(video)
12
+ user_d=str(user_name)
13
+ class_d=str(user_name)+'/'+str(class_name)
14
+ if not os.path.exists(user_d):
15
+ os.makedirs(user_d)
16
+ if not os.path.exists(class_d):
17
+ os.makedirs(class_d)
18
  frameNr = 0
19
  while (True):
20
 
21
  success, frame = capture.read()
22
 
23
  if success:
24
+ cv2.imwrite(f'{class_d}/frame_{frameNr}.jpg', frame)
25
 
26
  else:
27
  break
28
 
29
  frameNr = frameNr+10
30
 
31
+ img=cv2.imread(class_d+'/frame_0.jpg')
32
 
33
  return img, text
34
  demo = gr.Interface(video_identity,
35
  inputs=[gr.Video(source='upload'),
36
+ gr.Text(),
37
  gr.Text()],
38
  outputs=[gr.Image(),
39
  gr.Text()],