dylanplummer commited on
Commit
87f81d9
·
1 Parent(s): 64a0ea8

testing bicubic

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  import cv2
6
  import uuid
7
  import time
8
- import spaces
9
  import subprocess
10
  import matplotlib
11
  matplotlib.use('Agg')
@@ -51,7 +51,7 @@ def sigmoid(x):
51
  return 1 / (1 + np.exp(-x))
52
 
53
 
54
- @spaces.GPU()
55
  def inference(x, count_only_api, api_key,
56
  img_size=288, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
57
  miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True,
@@ -108,6 +108,7 @@ def inference(x, count_only_api, api_key,
108
  for i in tqdm(range(0, length + stride_length - stride_pad, stride_length)):
109
  batch = all_frames[i:i + seq_len]
110
  Xlist = []
 
111
  for img in batch:
112
  transforms_list = []
113
  # if center_crop:
@@ -118,7 +119,7 @@ def inference(x, count_only_api, api_key,
118
  # transforms_list.append(transforms.CenterCrop((img_size, img_size)))
119
  # else:
120
  transforms_list.append(SquarePad())
121
- transforms_list.append(transforms.Resize((img_size, img_size)))
122
 
123
 
124
  transforms_list += [
@@ -136,6 +137,7 @@ def inference(x, count_only_api, api_key,
136
  X *= 255
137
  batch_list.append(X.unsqueeze(0))
138
  idx_list.append(i)
 
139
  if len(batch_list) == batch_size:
140
  batch_X = torch.cat(batch_list)
141
  outputs = ort_sess.run(None, {'video': batch_X.numpy()})
 
5
  import cv2
6
  import uuid
7
  import time
8
+ #import spaces
9
  import subprocess
10
  import matplotlib
11
  matplotlib.use('Agg')
 
51
  return 1 / (1 + np.exp(-x))
52
 
53
 
54
+ #@spaces.GPU()
55
  def inference(x, count_only_api, api_key,
56
  img_size=288, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
57
  miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True,
 
108
  for i in tqdm(range(0, length + stride_length - stride_pad, stride_length)):
109
  batch = all_frames[i:i + seq_len]
110
  Xlist = []
111
+ print('Preprocessing...')
112
  for img in batch:
113
  transforms_list = []
114
  # if center_crop:
 
119
  # transforms_list.append(transforms.CenterCrop((img_size, img_size)))
120
  # else:
121
  transforms_list.append(SquarePad())
122
+ transforms_list.append(transforms.Resize((img_size, img_size)), interpolation=Image.BICUBIC)
123
 
124
 
125
  transforms_list += [
 
137
  X *= 255
138
  batch_list.append(X.unsqueeze(0))
139
  idx_list.append(i)
140
+ print('Running inference...')
141
  if len(batch_list) == batch_size:
142
  batch_X = torch.cat(batch_list)
143
  outputs = ort_sess.run(None, {'video': batch_X.numpy()})