File size: 3,430 Bytes
5833bb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38ace0a
5833bb7
 
38ace0a
5833bb7
 
 
 
8b508ea
 
 
 
 
 
5833bb7
 
 
2c29434
a2aaf2e
 
 
 
 
 
 
5833bb7
 
 
 
 
 
8273e3e
5833bb7
 
 
d28b1b0
5833bb7
 
 
 
 
 
 
 
 
 
3bfe529
 
38ace0a
5833bb7
 
 
 
 
 
 
 
38ace0a
 
 
5833bb7
 
38ace0a
 
 
5833bb7
3bfe529
 
 
 
 
 
5833bb7
38ace0a
 
 
3bfe529
38ace0a
5833bb7
 
 
 
38ace0a
 
 
3bfe529
38ace0a
 
5833bb7
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
import cv2
import matplotlib
import numpy as np
import os
from PIL import Image
import spaces
import torch
import tempfile
from gradio_imageslider import ImageSlider
from huggingface_hub import hf_hub_download

from ppd.utils.set_seed import set_seed
from ppd.models.ppd import PixelPerfectDepth

css = """
#img-display-container {
    max-height: 100vh;
}
#img-display-input {
    max-height: 100vh;
}
#img-display-output {
    max-height: 100vh;
}
#download {
    height: 62px;
}

#img-display-output .image-slider-image {
    object-fit: contain !important;
    width: 100% !important;
    height: 100% !important;
}
"""

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
model = PixelPerfectDepth(sampling_steps=4)
ckpt_path = hf_hub_download(
    repo_id="gangweix/Pixel-Perfect-Depth",
    filename="ppd.pth",
    repo_type="model"
)
state_dict = torch.load(ckpt_path, map_location="cpu")
model.load_state_dict(state_dict, strict=False)
model = model.to(DEVICE).eval()



title = "# Pixel-Perfect Depth"
description = """Official demo for **Pixel-Perfect Depth**.
Please refer to our [paper](), [project page](https://pixel-perfect-depth.github.io), and [github](https://github.com/gangweix/pixel-perfect-depth) for more details."""

@spaces.GPU
def predict_depth(image):
    return model.infer_image(image)

with gr.Blocks(css=css) as demo:
    gr.Markdown(title)
    gr.Markdown(description)
    gr.Markdown("### Depth Prediction demo")

    with gr.Row():
        input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
        depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
    submit = gr.Button(value="Predict Depth")
    
    concat_file = gr.File(label="Concatenated visualization (image+depth)", elem_id="image-depth-download")
    raw_file = gr.File(label="Raw depth output (saved as .npy)", elem_id="download",)

    cmap = matplotlib.colormaps.get_cmap('Spectral')

    def on_submit(image):
        original_image = image.copy()

        depth = predict_depth(image[:, :, ::-1])

        # save raw depth (npy)
        tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.npy', delete=False)
        np.save(tmp_raw_depth.name, depth)


        depth_vis = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
        depth_vis = depth_vis.astype(np.uint8)
        colored_depth = (cmap(depth_vis)[:, :, :3] * 255).astype(np.uint8)

        split_region = np.ones((image.shape[0], 50, 3), dtype=np.uint8) * 255
        combined_result = cv2.hconcat([image[:, :, ::-1], split_region, colored_depth[:, :, ::-1]])
        tmp_concat = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
        cv2.imwrite(tmp_concat.name, combined_result)

        return [(original_image, colored_depth), tmp_concat.name, tmp_raw_depth.name]

    submit.click(
        on_submit, 
        inputs=[input_image], 
        outputs=[depth_image_slider, concat_file, raw_file]
    )

    example_files = os.listdir('assets/examples')
    example_files.sort()
    example_files = [os.path.join('assets/examples', filename) for filename in example_files]
    examples = gr.Examples(
        examples=example_files, 
        inputs=[input_image], 
        outputs=[depth_image_slider, concat_file, raw_file], 
        fn=on_submit
    )


if __name__ == '__main__':
    demo.queue().launch(share=True)