InvincibleMeta Balaji23 commited on
Commit
f3cd9c3
·
verified ·
1 Parent(s): 554d3e4

Changes to cuda requirement for stateless GPU (#4)

Browse files

- Changes to cuda requirement for stateless GPU (fab4006624bbd0ef4355fb416ac7f2797315394d)


Co-authored-by: rao <Balaji23@users.noreply.huggingface.co>

preprocess/humanparsing/run_parsing.py CHANGED
@@ -1,29 +1,29 @@
1
- import pdb
2
- from pathlib import Path
3
- import sys
4
- import os
5
- import onnxruntime as ort
6
- PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
7
- sys.path.insert(0, str(PROJECT_ROOT))
8
- from parsing_api import onnx_inference
9
- import torch
10
-
11
-
12
- class Parsing:
13
- def __init__(self, gpu_id: int):
14
- self.gpu_id = gpu_id
15
- torch.cuda.set_device(gpu_id)
16
- session_options = ort.SessionOptions()
17
- session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
18
- session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
19
- session_options.add_session_config_entry('gpu_id', str(gpu_id))
20
- self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_atr.onnx'),
21
- sess_options=session_options, providers=['CPUExecutionProvider'])
22
- self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_lip.onnx'),
23
- sess_options=session_options, providers=['CPUExecutionProvider'])
24
-
25
-
26
- def __call__(self, input_image):
27
- # torch.cuda.set_device(self.gpu_id)
28
- parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image)
29
- return parsed_image, face_mask
 
1
+ import pdb
2
+ from pathlib import Path
3
+ import sys
4
+ import os
5
+ import onnxruntime as ort
6
+ PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
7
+ sys.path.insert(0, str(PROJECT_ROOT))
8
+ from parsing_api import onnx_inference
9
+ import torch
10
+
11
+
12
+ class Parsing:
13
+ def __init__(self, gpu_id: int):
14
+ # self.gpu_id = gpu_id
15
+ # torch.cuda.set_device(gpu_id)
16
+ session_options = ort.SessionOptions()
17
+ session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
18
+ session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
19
+ session_options.add_session_config_entry('gpu_id', str(gpu_id))
20
+ self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_atr.onnx'),
21
+ sess_options=session_options, providers=['CPUExecutionProvider'])
22
+ self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_lip.onnx'),
23
+ sess_options=session_options, providers=['CPUExecutionProvider'])
24
+
25
+
26
+ def __call__(self, input_image):
27
+ # torch.cuda.set_device(self.gpu_id)
28
+ parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image)
29
+ return parsed_image, face_mask