ArianatorQualquer commited on
Commit
ed9645c
·
verified ·
1 Parent(s): 9dae338

Create aye.py

Browse files
Files changed (1) hide show
  1. aye.py +88 -0
aye.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import gradio as gr
3
+ import matplotlib
4
+ import numpy as np
5
+ from PIL import Image
6
+ import torch
7
+ import tempfile
8
+ from gradio_imageslider import ImageSlider
9
+
10
+ from depth_anything_v2.dpt import DepthAnythingV2
11
+
12
+ css = """
13
+ #img-display-container {
14
+ max-height: 100vh;
15
+ }
16
+ #img-display-input {
17
+ max-height: 80vh;
18
+ }
19
+ #img-display-output {
20
+ max-height: 80vh;
21
+ }
22
+ #download {
23
+ height: 62px;
24
+ }
25
+ """
26
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
27
+ model_configs = {
28
+ 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
29
+ 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
30
+ 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
31
+ 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
32
+ }
33
+ encoder = 'vitl'
34
+ model = DepthAnythingV2(**model_configs[encoder])
35
+ state_dict = torch.load(f'checkpoints/depth_anything_v2_{encoder}.pth', map_location="cpu")
36
+ model.load_state_dict(state_dict)
37
+ model = model.to(DEVICE).eval()
38
+
39
+ title = "# Depth Anything V2"
40
+ description = """Official demo for **Depth Anything V2**.
41
+ Please refer to our [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), or [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details."""
42
+
43
+ def predict_depth(image):
44
+ return model.infer_image(image)
45
+
46
+ with gr.Blocks(css=css) as demo:
47
+ gr.Markdown(title)
48
+ gr.Markdown(description)
49
+ gr.Markdown("### Depth Prediction demo")
50
+
51
+ with gr.Row():
52
+ input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
53
+ depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
54
+ submit = gr.Button(value="Compute Depth")
55
+ gray_depth_file = gr.File(label="Grayscale depth map", elem_id="download",)
56
+ raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",)
57
+
58
+ cmap = matplotlib.colormaps.get_cmap('Spectral_r')
59
+
60
+ def on_submit(image):
61
+ original_image = image.copy()
62
+
63
+ h, w = image.shape[:2]
64
+
65
+ depth = predict_depth(image[:, :, ::-1])
66
+
67
+ raw_depth = Image.fromarray(depth.astype('uint16'))
68
+ tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
69
+ raw_depth.save(tmp_raw_depth.name)
70
+
71
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
72
+ depth = depth.astype(np.uint8)
73
+ colored_depth = (cmap(depth)[:, :, :3] * 255).astype(np.uint8)
74
+
75
+ gray_depth = Image.fromarray(depth)
76
+ tmp_gray_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
77
+ gray_depth.save(tmp_gray_depth.name)
78
+
79
+ return [(original_image, colored_depth), tmp_gray_depth.name, tmp_raw_depth.name]
80
+
81
+ submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file])
82
+
83
+ example_files = glob.glob('assets/examples/*')
84
+ examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file], fn=on_submit)
85
+
86
+
87
+ if __name__ == '__main__':
88
+ demo.queue().launch(debug=True, share=True)