Spaces:
Runtime error
Runtime error
app.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
|
|
|
|
|
|
| 3 |
from PIFuHD.data import EvalWMetaDataset
|
| 4 |
from PIFuHD.data.ImageBundle import ImageBundle
|
| 5 |
from PIFuHD.options import BaseOptions
|
|
@@ -9,9 +11,7 @@ from human_pose_estimator import PoseEstimator
|
|
| 9 |
from estimator import rect
|
| 10 |
|
| 11 |
REPO_ID = "cxeep/PIFuHD"
|
| 12 |
-
|
| 13 |
pose_estimator = PoseEstimator("cpu")
|
| 14 |
-
|
| 15 |
checkpoint_path = hf_hub_download(repo_id=REPO_ID, filename="pifuhd.pt")
|
| 16 |
|
| 17 |
cmd = [
|
|
@@ -23,51 +23,74 @@ cmd = [
|
|
| 23 |
'--start_id', '-1',
|
| 24 |
'--end_id', '-1'
|
| 25 |
]
|
| 26 |
-
|
| 27 |
parser = BaseOptions()
|
| 28 |
opts = parser.parse(cmd)
|
| 29 |
reconstructor = Reconstructor(opts)
|
| 30 |
|
| 31 |
-
|
| 32 |
def make_bundle(image, name):
|
| 33 |
image, rects = rect(pose_estimator, image)
|
| 34 |
return ImageBundle(img=image, name=name, meta=rects)
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
footer = r"""
|
| 44 |
<center>
|
| 45 |
-
<b>
|
| 46 |
-
3D Human Digitization
|
| 47 |
-
</b>
|
| 48 |
</center>
|
| 49 |
"""
|
| 50 |
|
| 51 |
with gr.Blocks(title="PIFuHD") as app:
|
| 52 |
gr.HTML("<center><h1>3D Human Digitization</h1></center>")
|
|
|
|
|
|
|
| 53 |
with gr.Row(equal_height=False):
|
| 54 |
with gr.Column():
|
| 55 |
input_img = gr.Image(type="numpy", label="Input image")
|
|
|
|
| 56 |
run_btn = gr.Button(variant="primary")
|
| 57 |
with gr.Column():
|
| 58 |
output_obj = gr.Model3D(label="Output model")
|
| 59 |
output_img = gr.Image(type="filepath", label="Output image")
|
| 60 |
-
gr.ClearButton(components=[input_img, output_img, output_obj], variant="stop")
|
| 61 |
-
|
| 62 |
-
run_btn.click(predict, [input_img], [output_img, output_obj])
|
| 63 |
-
|
| 64 |
with gr.Row():
|
| 65 |
blobs = [[f"examples/{x:02d}.png"] for x in range(1, 4)]
|
| 66 |
examples = gr.Dataset(components=[input_img], samples=blobs)
|
| 67 |
examples.click(lambda x: x[0], [examples], [input_img])
|
| 68 |
-
|
| 69 |
with gr.Row():
|
| 70 |
gr.HTML(footer)
|
| 71 |
|
| 72 |
app.launch(share=False, debug=True, show_error=True)
|
| 73 |
-
app.queue()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
from PIL import Image
|
| 5 |
from PIFuHD.data import EvalWMetaDataset
|
| 6 |
from PIFuHD.data.ImageBundle import ImageBundle
|
| 7 |
from PIFuHD.options import BaseOptions
|
|
|
|
| 11 |
from estimator import rect
|
| 12 |
|
| 13 |
REPO_ID = "cxeep/PIFuHD"
|
|
|
|
| 14 |
pose_estimator = PoseEstimator("cpu")
|
|
|
|
| 15 |
checkpoint_path = hf_hub_download(repo_id=REPO_ID, filename="pifuhd.pt")
|
| 16 |
|
| 17 |
cmd = [
|
|
|
|
| 23 |
'--start_id', '-1',
|
| 24 |
'--end_id', '-1'
|
| 25 |
]
|
|
|
|
| 26 |
parser = BaseOptions()
|
| 27 |
opts = parser.parse(cmd)
|
| 28 |
reconstructor = Reconstructor(opts)
|
| 29 |
|
|
|
|
| 30 |
def make_bundle(image, name):
|
| 31 |
image, rects = rect(pose_estimator, image)
|
| 32 |
return ImageBundle(img=image, name=name, meta=rects)
|
| 33 |
|
| 34 |
+
def process_video(video_path):
|
| 35 |
+
frames = []
|
| 36 |
+
cap = cv2.VideoCapture(video_path)
|
| 37 |
+
while cap.isOpened():
|
| 38 |
+
ret, frame = cap.read()
|
| 39 |
+
if not ret:
|
| 40 |
+
break
|
| 41 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 42 |
+
frames.append(Image.fromarray(frame))
|
| 43 |
+
cap.release()
|
| 44 |
+
|
| 45 |
+
models = []
|
| 46 |
+
for frame in frames:
|
| 47 |
+
bundle = make_bundle(np.array(frame), "Model3D")
|
| 48 |
+
dataset = EvalWMetaDataset(opts, [bundle])
|
| 49 |
+
model = reconstructor.evaluate(dataset)
|
| 50 |
+
models.append(model)
|
| 51 |
+
|
| 52 |
+
# TODO: Combine models into animation
|
| 53 |
+
output_animation = models[0] # Placeholder, replace with actual animation
|
| 54 |
+
|
| 55 |
+
return output_animation
|
| 56 |
+
|
| 57 |
+
def predict(input):
|
| 58 |
+
if isinstance(input, str): # video
|
| 59 |
+
return process_video(input)
|
| 60 |
+
else: # image
|
| 61 |
+
bundle = make_bundle(input, "Model3D")
|
| 62 |
+
dataset = EvalWMetaDataset(opts, [bundle])
|
| 63 |
+
return reconstructor.evaluate(dataset)
|
| 64 |
|
| 65 |
footer = r"""
|
| 66 |
<center>
|
| 67 |
+
<b>Demo for <a href='https://github.com/facebookresearch/pifuhd'>PIFuHD</a></b>
|
|
|
|
|
|
|
| 68 |
</center>
|
| 69 |
"""
|
| 70 |
|
| 71 |
with gr.Blocks(title="PIFuHD") as app:
|
| 72 |
gr.HTML("<center><h1>3D Human Digitization</h1></center>")
|
| 73 |
+
gr.HTML("<center><h3>PIFuHD: Multi-Level Pixel-Aligned Implicit Function for High-Resolution 3D Human Digitization (CVPR 2020)</h3></center>")
|
| 74 |
+
|
| 75 |
with gr.Row(equal_height=False):
|
| 76 |
with gr.Column():
|
| 77 |
input_img = gr.Image(type="numpy", label="Input image")
|
| 78 |
+
input_video = gr.Video(type="filepath", label="Input Video")
|
| 79 |
run_btn = gr.Button(variant="primary")
|
| 80 |
with gr.Column():
|
| 81 |
output_obj = gr.Model3D(label="Output model")
|
| 82 |
output_img = gr.Image(type="filepath", label="Output image")
|
| 83 |
+
gr.ClearButton(components=[input_img, input_video, output_img, output_obj], variant="stop")
|
| 84 |
+
|
| 85 |
+
run_btn.click(predict, [input_img, input_video], [output_img, output_obj])
|
| 86 |
+
|
| 87 |
with gr.Row():
|
| 88 |
blobs = [[f"examples/{x:02d}.png"] for x in range(1, 4)]
|
| 89 |
examples = gr.Dataset(components=[input_img], samples=blobs)
|
| 90 |
examples.click(lambda x: x[0], [examples], [input_img])
|
| 91 |
+
|
| 92 |
with gr.Row():
|
| 93 |
gr.HTML(footer)
|
| 94 |
|
| 95 |
app.launch(share=False, debug=True, show_error=True)
|
| 96 |
+
app.queue()
|