dylanplummer commited on
Commit
376fff6
·
1 Parent(s): 044d089

add video upload

Browse files
Files changed (1) hide show
  1. app.py +18 -2
app.py CHANGED
@@ -5,6 +5,7 @@ import os
5
  import cv2
6
  import math
7
  import json
 
8
  import subprocess
9
  import matplotlib
10
  matplotlib.use('Agg')
@@ -36,7 +37,7 @@ os.makedirs(os.path.join(os.getcwd(), 'clips'), exist_ok=True)
36
 
37
  current_model = 'nextjump_speed'
38
  onnx_file = hf_hub_download(repo_id="lumos-motion/nextjump", filename=f"{current_model}.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
39
-
40
 
41
  if torch.cuda.is_available():
42
  print("Using CUDA")
@@ -236,6 +237,18 @@ def detect_relay_beeps(video_path, event_start, relay_length=30, n_jumpers=4, be
236
  return starts, ends
237
 
238
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  def inference(in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay,
240
  count_only_api, api_key, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
241
  miss_threshold=0.8, marks_threshold=0.5, median_pred_filter=True, both_feet=True,
@@ -262,6 +275,7 @@ def inference(in_video, use_60fps, model_choice, beep_detection_on, event_length
262
  ort_sess.run(None, {'video': np.zeros((4, 64, 3, IMG_SIZE, IMG_SIZE), dtype=np.float32)})
263
 
264
  in_video = download_clips(in_video, os.path.join(os.getcwd(), 'clips'), '00:00:00', '', use_60fps=use_60fps)
 
265
  progress(0, desc="Running inference...")
266
  has_access = False
267
  if api_call:
@@ -796,7 +810,7 @@ with gr.Blocks() as demo:
796
  demo_inference = partial(inference, count_only_api=False, api_key=None)
797
 
798
  run_button.click(demo_inference, [in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay],
799
- outputs=[out_text, out_plot, out_phase_spiral, out_phase, out_hist, out_event_type_dist])
800
  api_inference = partial(inference, api_call=True)
801
  api_dummy_button.click(api_inference, [in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay, count_only, api_token],
802
  outputs=[period_length], api_name='inference')
@@ -810,6 +824,8 @@ with gr.Blocks() as demo:
810
  inputs=[in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay],
811
  outputs=[out_text, out_plot, out_phase_spiral, out_phase, out_hist, out_event_type_dist],
812
  fn=demo_inference, cache_examples=False)
 
 
813
 
814
 
815
  if __name__ == "__main__":
 
5
  import cv2
6
  import math
7
  import json
8
+ import time
9
  import subprocess
10
  import matplotlib
11
  matplotlib.use('Agg')
 
37
 
38
  current_model = 'nextjump_speed'
39
  onnx_file = hf_hub_download(repo_id="lumos-motion/nextjump", filename=f"{current_model}.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
40
+ api = HfApi()
41
 
42
  if torch.cuda.is_available():
43
  print("Using CUDA")
 
237
  return starts, ends
238
 
239
 
240
+ def upload_video(out_text, in_video):
241
+ if out_text != '':
242
+ # generate a timestamp name for the video
243
+ upload_path = f"{int(time.time())}.mp4"
244
+ api.upload_file(
245
+ path_or_fileobj=in_video,
246
+ path_in_repo=upload_path,
247
+ repo_id="dylanplummer/single-rope-contest",
248
+ repo_type="dataset",
249
+ )
250
+
251
+
252
  def inference(in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay,
253
  count_only_api, api_key, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
254
  miss_threshold=0.8, marks_threshold=0.5, median_pred_filter=True, both_feet=True,
 
275
  ort_sess.run(None, {'video': np.zeros((4, 64, 3, IMG_SIZE, IMG_SIZE), dtype=np.float32)})
276
 
277
  in_video = download_clips(in_video, os.path.join(os.getcwd(), 'clips'), '00:00:00', '', use_60fps=use_60fps)
278
+
279
  progress(0, desc="Running inference...")
280
  has_access = False
281
  if api_call:
 
810
  demo_inference = partial(inference, count_only_api=False, api_key=None)
811
 
812
  run_button.click(demo_inference, [in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay],
813
+ outputs=[out_text, out_plot, out_phase_spiral, out_phase, out_hist, out_event_type_dist]).then(upload_video, inputs=[out_text, in_video])
814
  api_inference = partial(inference, api_call=True)
815
  api_dummy_button.click(api_inference, [in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay, count_only, api_token],
816
  outputs=[period_length], api_name='inference')
 
824
  inputs=[in_video, use_60fps, model_choice, beep_detection_on, event_length, relay_detection_on, relay_length, switch_delay],
825
  outputs=[out_text, out_plot, out_phase_spiral, out_phase, out_hist, out_event_type_dist],
826
  fn=demo_inference, cache_examples=False)
827
+
828
+
829
 
830
 
831
  if __name__ == "__main__":