dylanplummer commited on
Commit
ab0b18f
·
1 Parent(s): 239d56c

add more examples

Browse files
Files changed (1) hide show
  1. app.py +14 -17
app.py CHANGED
@@ -35,12 +35,6 @@ model_ir = ie.read_model(model=model_xml)
35
  config = {"PERFORMANCE_HINT": "LATENCY"}
36
  compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU", config=config)
37
 
38
- a = os.path.join(os.path.dirname(__file__), "files", "dylan.mp4")
39
- b = os.path.join(os.path.dirname(__file__), "files", "train14.mp4")
40
- c = os.path.join(os.path.dirname(__file__), "files", "train_17.mp4")
41
- d = os.path.join(os.path.dirname(__file__), "files", "train13.mp4")
42
- e = os.path.join(os.path.dirname(__file__), "files", "train_213.mp4")
43
-
44
 
45
  class SquarePad:
46
  # https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9
@@ -243,9 +237,10 @@ def inference(x, count_only_api, api_key, img_size=192, seq_len=64, stride_lengt
243
  'miss': misses,
244
  'frame_type': frame_type,
245
  'jumps': full_marks,
246
- 'jumps_size': (full_marks + 0.1) * 8,
247
  'miss_size': np.clip((1 - periodicity) * 0.9 + 0.1, 1, 10),
248
  'seconds': np.linspace(0, seconds, num=len(periodLength))})
 
249
  fig = px.scatter(data_frame=df,
250
  x='seconds',
251
  y='jumps per second',
@@ -261,6 +256,7 @@ def inference(x, count_only_api, api_key, img_size=192, seq_len=64, stride_lengt
261
  trendline_color_override="goldenrod",
262
  trendline_scope='overall',
263
  template="plotly_dark")
 
264
  fig.update_layout(legend=dict(
265
  orientation="h",
266
  yanchor="bottom",
@@ -332,30 +328,31 @@ with gr.Blocks(theme='WeixuanYuan/Soft_dark') as demo:
332
 
333
  with gr.Accordion(label="Instructions and more information", open=False):
334
  instructions = "## Instructions:"
335
- instructions += "\n* Upload a video and click 'Run' to get a prediction of the number of jumps (either one foot, or both). This could take a couple minutes! The model is trained on single rope and double dutch speed, but try out any videos you want."
336
- instructions += "\n* If you know the true count, you can provide it and your video will be used later to improve the model."
337
  instructions += "\n\n## Tips (optional):"
338
  instructions += "\n* Trim the video to start and end of the event"
339
  instructions += "\n* Frame the jumper fully, in the center of the frame"
340
  instructions += "\n* Videos are automatically resized, so higher resolution will not help, but a closer framing of the jumper might help. Try cropping the video differently."
341
- instructions += "\n\n\nUnfortunately due to inference costs, right now we have to deploy a slower, less accurate version of the model but it still works surprisingly well in most cases. If you run into any issues let us know. If you would like to contribute to the project, please reach out!"
342
  gr.Markdown(instructions)
343
 
344
  faq = "## FAQ:"
345
  faq += "\n* **Q:** Does the model recognize misses?\n * **A:** Yes, but if it fails, you can try tuning the miss threshold slider to make it more sensitive."
346
  faq += "\n* **Q:** Does the model recognize double dutch?\n * **A:** Yes, but it is trained on a smaller set of double dutch videos, so it may not work perfectly."
347
- faq += "\n* **Q:** Does the model recognize double unders\n * **A:** Yes, but it is trained on a smaller set of double under videos, so it may not work perfectly. It is also trained to count the rope, not the jumps so you will need to divide the count by 2."
348
- faq += "\n* **Q:** Does the model count both feet?\n * **A:** Yes, but for convention we usually return the halved (one foot) score."
349
  gr.Markdown(faq)
350
 
351
  demo_inference = partial(inference, count_only_api=False, api_key=None)
352
 
353
  gr.Examples(examples=[
354
- [a],
355
- [b],
356
- [c],
357
- [d],
358
- [e]
 
 
 
359
  ],
360
  inputs=[in_video],
361
  outputs=[out_text, out_plot, out_hist, out_event_type_dist],
 
35
  config = {"PERFORMANCE_HINT": "LATENCY"}
36
  compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU", config=config)
37
 
 
 
 
 
 
 
38
 
39
  class SquarePad:
40
  # https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9
 
237
  'miss': misses,
238
  'frame_type': frame_type,
239
  'jumps': full_marks,
240
+ 'jumps_size': (full_marks + 0.05) * 10,
241
  'miss_size': np.clip((1 - periodicity) * 0.9 + 0.1, 1, 10),
242
  'seconds': np.linspace(0, seconds, num=len(periodLength))})
243
+
244
  fig = px.scatter(data_frame=df,
245
  x='seconds',
246
  y='jumps per second',
 
256
  trendline_color_override="goldenrod",
257
  trendline_scope='overall',
258
  template="plotly_dark")
259
+
260
  fig.update_layout(legend=dict(
261
  orientation="h",
262
  yanchor="bottom",
 
328
 
329
  with gr.Accordion(label="Instructions and more information", open=False):
330
  instructions = "## Instructions:"
331
+ instructions += "\n* Upload a video and click 'Run' to get a prediction of the number of jumps (either one foot, or both). This could take a couple minutes!"
 
332
  instructions += "\n\n## Tips (optional):"
333
  instructions += "\n* Trim the video to start and end of the event"
334
  instructions += "\n* Frame the jumper fully, in the center of the frame"
335
  instructions += "\n* Videos are automatically resized, so higher resolution will not help, but a closer framing of the jumper might help. Try cropping the video differently."
 
336
  gr.Markdown(instructions)
337
 
338
  faq = "## FAQ:"
339
  faq += "\n* **Q:** Does the model recognize misses?\n * **A:** Yes, but if it fails, you can try tuning the miss threshold slider to make it more sensitive."
340
  faq += "\n* **Q:** Does the model recognize double dutch?\n * **A:** Yes, but it is trained on a smaller set of double dutch videos, so it may not work perfectly."
341
+ faq += "\n* **Q:** Does the model recognize double unders\n * **A:** Yes, but it is trained on a smaller set of double under videos, so it may not work perfectly. It is also trained to count the rope, not the jumps so you will need to divide the count by 2 to get the traditional double under count."
342
+ faq += "\n* **Q:** Does the model count both feet?\n * **A:** Yes, it counts every time the rope goes around no matter the event."
343
  gr.Markdown(faq)
344
 
345
  demo_inference = partial(inference, count_only_api=False, api_key=None)
346
 
347
  gr.Examples(examples=[
348
+ [os.path.join(os.path.dirname(__file__), "files", "dylan.mp4")],
349
+ [os.path.join(os.path.dirname(__file__), "files", "train14.mp4")],
350
+ [os.path.join(os.path.dirname(__file__), "files", "train_17.mp4")],
351
+ [os.path.join(os.path.dirname(__file__), "files", "train13.mp4")],
352
+ [os.path.join(os.path.dirname(__file__), "files", "train_213.mp4")],
353
+ [os.path.join(os.path.dirname(__file__), "files", "train_156.mp4")],
354
+ [os.path.join(os.path.dirname(__file__), "files", "train_202.mp4")],
355
+ [os.path.join(os.path.dirname(__file__), "files", "train_57.mp4")],
356
  ],
357
  inputs=[in_video],
358
  outputs=[out_text, out_plot, out_hist, out_event_type_dist],