dylanplummer commited on
Commit
741c26c
·
1 Parent(s): c134ff4

move cuda init

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -36,15 +36,6 @@ onnx_file = hf_hub_download(repo_id="dylanplummer/ropenet", filename="model.onnx
36
  # config = {"PERFORMANCE_HINT": "LATENCY"}
37
  # compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU", config=config)
38
 
39
- # check if GPU is available
40
- if torch.cuda.is_available():
41
- providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
42
- "user_compute_stream": str(torch.cuda.current_stream().cuda_stream)})]
43
- sess_options = ort.SessionOptions()
44
- ort_sess = ort.InferenceSession(onnx_file, sess_options=sess_options, providers=providers)
45
- else:
46
- ort_sess = ort.InferenceSession(onnx_file)
47
-
48
 
49
  class SquarePad:
50
  # https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9
@@ -63,6 +54,14 @@ def sigmoid(x):
63
  @spaces.GPU(duration=180)
64
  def inference(x, count_only_api, api_key, img_size=224, seq_len=64, stride_length=32, stride_pad=3, batch_size=4, miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True, api_call=False):
65
  print(x)
 
 
 
 
 
 
 
 
66
  #api = HfApi(token=os.environ['DATASET_SECRET'])
67
  #out_file = str(uuid.uuid1())
68
  has_access = False
 
36
  # config = {"PERFORMANCE_HINT": "LATENCY"}
37
  # compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU", config=config)
38
 
 
 
 
 
 
 
 
 
 
39
 
40
  class SquarePad:
41
  # https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9
 
54
  @spaces.GPU(duration=180)
55
  def inference(x, count_only_api, api_key, img_size=224, seq_len=64, stride_length=32, stride_pad=3, batch_size=4, miss_threshold=0.8, marks_threshold=0.6, median_pred_filter=True, center_crop=True, both_feet=True, api_call=False):
56
  print(x)
57
+ # check if GPU is available
58
+ if torch.cuda.is_available():
59
+ providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
60
+ "user_compute_stream": str(torch.cuda.current_stream().cuda_stream)})]
61
+ sess_options = ort.SessionOptions()
62
+ ort_sess = ort.InferenceSession(onnx_file, sess_options=sess_options, providers=providers)
63
+ else:
64
+ ort_sess = ort.InferenceSession(onnx_file)
65
  #api = HfApi(token=os.environ['DATASET_SECRET'])
66
  #out_file = str(uuid.uuid1())
67
  has_access = False