Spaces:
Build error
Build error
Commit
·
607956f
1
Parent(s):
b05da2d
fix pose im size
Browse files- app.py +1 -1
- inference/pose.py +2 -2
app.py
CHANGED
|
@@ -13,7 +13,7 @@ def update_model_choices(task):
|
|
| 13 |
model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
|
| 14 |
return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
|
| 15 |
|
| 16 |
-
@spaces.GPU()
|
| 17 |
def process_image(input_image, task, version):
|
| 18 |
if isinstance(input_image, np.ndarray):
|
| 19 |
input_image = Image.fromarray(input_image)
|
|
|
|
| 13 |
model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
|
| 14 |
return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
|
| 15 |
|
| 16 |
+
@spaces.GPU(duration=12)
|
| 17 |
def process_image(input_image, task, version):
|
| 18 |
if isinstance(input_image, np.ndarray):
|
| 19 |
input_image = Image.fromarray(input_image)
|
inference/pose.py
CHANGED
|
@@ -137,6 +137,8 @@ def process_image_or_video(input_data, task='pose', version='sapiens_1b'):
|
|
| 137 |
if model is None or device is None:
|
| 138 |
return None
|
| 139 |
|
|
|
|
|
|
|
| 140 |
def process_frame(frame):
|
| 141 |
if isinstance(frame, np.ndarray):
|
| 142 |
frame = Image.fromarray(frame)
|
|
@@ -144,8 +146,6 @@ def process_image_or_video(input_data, task='pose', version='sapiens_1b'):
|
|
| 144 |
if frame.mode == 'RGBA':
|
| 145 |
frame = frame.convert('RGB')
|
| 146 |
|
| 147 |
-
input_shape = (3, frame.height, frame.width)
|
| 148 |
-
|
| 149 |
img = preprocess_image(frame, input_shape)
|
| 150 |
|
| 151 |
with torch.no_grad():
|
|
|
|
| 137 |
if model is None or device is None:
|
| 138 |
return None
|
| 139 |
|
| 140 |
+
input_shape = (3, 1024, 768)
|
| 141 |
+
|
| 142 |
def process_frame(frame):
|
| 143 |
if isinstance(frame, np.ndarray):
|
| 144 |
frame = Image.fromarray(frame)
|
|
|
|
| 146 |
if frame.mode == 'RGBA':
|
| 147 |
frame = frame.convert('RGB')
|
| 148 |
|
|
|
|
|
|
|
| 149 |
img = preprocess_image(frame, input_shape)
|
| 150 |
|
| 151 |
with torch.no_grad():
|