dylanplummer commited on
Commit
779feb1
·
1 Parent(s): 73b1bf3
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -97,6 +97,7 @@ def inference(stream_url, start_time, end_time, count_only_api, api_key,
97
  seconds = length / fps
98
  all_frames = []
99
  frame_i = 1
 
100
  while cap.isOpened():
101
  ret, frame = cap.read()
102
  if ret is False:
@@ -107,6 +108,7 @@ def inference(stream_url, start_time, end_time, count_only_api, api_key,
107
  all_frames.append(img)
108
  frame_i += 1
109
  cap.release()
 
110
 
111
  length = len(all_frames)
112
  period_lengths = np.zeros(len(all_frames) + seq_len + stride_length)
@@ -123,7 +125,6 @@ def inference(stream_url, start_time, end_time, count_only_api, api_key,
123
  for i in tqdm(range(0, length + stride_length - stride_pad, stride_length)):
124
  batch = all_frames[i:i + seq_len]
125
  Xlist = []
126
- print('Preprocessing...')
127
  for img in batch:
128
  frameTensor = preprocess(img).unsqueeze(0)
129
  Xlist.append(frameTensor)
@@ -136,7 +137,6 @@ def inference(stream_url, start_time, end_time, count_only_api, api_key,
136
  X *= 255
137
  batch_list.append(X.unsqueeze(0))
138
  idx_list.append(i)
139
- print('Running inference...')
140
  if len(batch_list) == batch_size:
141
  batch_X = torch.cat(batch_list)
142
  outputs = ort_sess.run(None, {'video': batch_X.numpy()})
 
97
  seconds = length / fps
98
  all_frames = []
99
  frame_i = 1
100
+ print('Reading frames...')
101
  while cap.isOpened():
102
  ret, frame = cap.read()
103
  if ret is False:
 
108
  all_frames.append(img)
109
  frame_i += 1
110
  cap.release()
111
+ print('Done!')
112
 
113
  length = len(all_frames)
114
  period_lengths = np.zeros(len(all_frames) + seq_len + stride_length)
 
125
  for i in tqdm(range(0, length + stride_length - stride_pad, stride_length)):
126
  batch = all_frames[i:i + seq_len]
127
  Xlist = []
 
128
  for img in batch:
129
  frameTensor = preprocess(img).unsqueeze(0)
130
  Xlist.append(frameTensor)
 
137
  X *= 255
138
  batch_list.append(X.unsqueeze(0))
139
  idx_list.append(i)
 
140
  if len(batch_list) == batch_size:
141
  batch_X = torch.cat(batch_list)
142
  outputs = ort_sess.run(None, {'video': batch_X.numpy()})