dylanplummer commited on
Commit
bded07f
·
1 Parent(s): 767d5c4

bigger model time

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  import cv2
6
  import uuid
7
  import time
8
- import spaces
9
  import subprocess
10
  import matplotlib
11
  matplotlib.use('Agg')
@@ -26,7 +26,7 @@ from huggingface_hub import HfApi
26
 
27
  plt.style.use('dark_background')
28
 
29
- onnx_file = hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
30
  # model_xml = hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.xml", repo_type="model", token=os.environ['DATASET_SECRET'])
31
  # hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.mapping", repo_type="model", token=os.environ['DATASET_SECRET'])
32
  #model_xml = "model_ir/model.xml"
@@ -51,9 +51,9 @@ def sigmoid(x):
51
  return 1 / (1 + np.exp(-x))
52
 
53
 
54
- @spaces.GPU()
55
  def inference(x, count_only_api, api_key,
56
- img_size=224, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
57
  miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True,
58
  api_call=False,
59
  progress=gr.Progress()):
@@ -138,7 +138,7 @@ def inference(x, count_only_api, api_key,
138
  idx_list.append(i)
139
  if len(batch_list) == batch_size:
140
  batch_X = torch.cat(batch_list)
141
- outputs = ort_sess.run(None, {'frames': batch_X.numpy()})
142
  y1pred = outputs[0]
143
  y2pred = outputs[1]
144
  y3pred = outputs[2]
@@ -162,7 +162,7 @@ def inference(x, count_only_api, api_key,
162
  batch_list.append(batch_list[-1])
163
  idx_list.append(idx_list[-1])
164
  batch_X = torch.cat(batch_list)
165
- outputs = ort_sess.run(None, {'frames': batch_X.numpy()})
166
  y1pred = outputs[0]
167
  y2pred = outputs[1]
168
  y3pred = outputs[2]
 
5
  import cv2
6
  import uuid
7
  import time
8
+ #import spaces
9
  import subprocess
10
  import matplotlib
11
  matplotlib.use('Agg')
 
26
 
27
  plt.style.use('dark_background')
28
 
29
+ onnx_file = hf_hub_download(repo_id="dylanplummer/ropenet", filename="nextjump.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
30
  # model_xml = hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.xml", repo_type="model", token=os.environ['DATASET_SECRET'])
31
  # hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.mapping", repo_type="model", token=os.environ['DATASET_SECRET'])
32
  #model_xml = "model_ir/model.xml"
 
51
  return 1 / (1 + np.exp(-x))
52
 
53
 
54
+ #@spaces.GPU()
55
  def inference(x, count_only_api, api_key,
56
+ img_size=288, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
57
  miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True,
58
  api_call=False,
59
  progress=gr.Progress()):
 
138
  idx_list.append(i)
139
  if len(batch_list) == batch_size:
140
  batch_X = torch.cat(batch_list)
141
+ outputs = ort_sess.run(None, {'video': batch_X.numpy()})
142
  y1pred = outputs[0]
143
  y2pred = outputs[1]
144
  y3pred = outputs[2]
 
162
  batch_list.append(batch_list[-1])
163
  idx_list.append(idx_list[-1])
164
  batch_X = torch.cat(batch_list)
165
+ outputs = ort_sess.run(None, {'video': batch_X.numpy()})
166
  y1pred = outputs[0]
167
  y2pred = outputs[1]
168
  y3pred = outputs[2]