niye4 commited on
Commit
2b2c9f7
·
verified ·
1 Parent(s): 7a15f8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -83
app.py CHANGED
@@ -1,88 +1,57 @@
1
- import glob
 
2
  import gradio as gr
3
- import matplotlib
4
- import numpy as np
5
  from PIL import Image
6
- import torch
7
  import tempfile
8
- from gradio_imageslider import ImageSlider
9
-
10
- from depth_anything_v2.dpt import DepthAnythingV2
11
-
12
- css = """
13
- #img-display-container {
14
- max-height: 100vh;
15
- }
16
- #img-display-input {
17
- max-height: 80vh;
18
- }
19
- #img-display-output {
20
- max-height: 80vh;
21
- }
22
- #download {
23
- height: 62px;
24
- }
25
- """
26
- DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
27
- model_configs = {
28
- 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
29
- 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
30
- 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
31
- 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
32
- }
33
- encoder = 'vitl'
34
- model = DepthAnythingV2(**model_configs[encoder])
35
- state_dict = torch.load(f'checkpoints/depth_anything_v2_{encoder}.pth', map_location="cpu")
36
- model.load_state_dict(state_dict)
37
- model = model.to(DEVICE).eval()
38
-
39
- title = "# Depth Anything V2"
40
- description = """Official demo for **Depth Anything V2**.
41
- Please refer to our [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), or [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details."""
42
-
43
- def predict_depth(image):
44
- return model.infer_image(image)
45
-
46
- with gr.Blocks(css=css) as demo:
47
- gr.Markdown(title)
48
- gr.Markdown(description)
49
- gr.Markdown("### Depth Prediction demo")
50
-
51
- with gr.Row():
52
- input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
53
- depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
54
- submit = gr.Button(value="Compute Depth")
55
- gray_depth_file = gr.File(label="Grayscale depth map", elem_id="download",)
56
- raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",)
57
-
58
- cmap = matplotlib.colormaps.get_cmap('Spectral_r')
59
-
60
- def on_submit(image):
61
- original_image = image.copy()
62
-
63
- h, w = image.shape[:2]
64
-
65
- depth = predict_depth(image[:, :, ::-1])
66
-
67
- raw_depth = Image.fromarray(depth.astype('uint16'))
68
- tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
69
- raw_depth.save(tmp_raw_depth.name)
70
-
71
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
72
- depth = depth.astype(np.uint8)
73
- colored_depth = (cmap(depth)[:, :, :3] * 255).astype(np.uint8)
74
-
75
- gray_depth = Image.fromarray(depth)
76
- tmp_gray_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
77
- gray_depth.save(tmp_gray_depth.name)
78
-
79
- return [(original_image, colored_depth), tmp_gray_depth.name, tmp_raw_depth.name]
80
-
81
- submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file])
82
-
83
- example_files = glob.glob('assets/examples/*')
84
- examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file], fn=on_submit)
85
-
86
 
87
- if __name__ == '__main__':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  demo.queue().launch()
 
1
+ import os
2
+ import subprocess
3
  import gradio as gr
4
+ from gradio_imageslider import ImageSlider
 
5
  from PIL import Image
6
+ import glob
7
  import tempfile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ # Output folder
10
+ OUTPUT_DIR = "output"
11
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
12
+
13
+ def process_video(video_file):
14
+ """
15
+ Run DepthAnythingV2 run_video.py CLI to process the uploaded MP4 video
16
+ and return both a slider view (per-frame depth map) and the output video.
17
+ """
18
+ video_path = video_file.name
19
+
20
+ # Call run_video.py using subprocess
21
+ cmd = [
22
+ "python", "run_video.py",
23
+ "--encoder", "vitb",
24
+ "--video-path", video_path,
25
+ "--outdir", OUTPUT_DIR,
26
+ "--grayscale",
27
+ "--pred-only"
28
+ ]
29
+ subprocess.run(cmd, check=True)
30
+
31
+ # run_video.py output video filename: <input>_depth.mp4
32
+ base_name = os.path.basename(video_path)
33
+ output_video = os.path.join(OUTPUT_DIR, base_name.replace(".mp4", "_depth.mp4"))
34
+
35
+ # Prepare slider images for per-frame preview
36
+ # Assume run_video.py saves frames as PNG in output folder
37
+ slider_images = []
38
+ frame_files = sorted(glob.glob(os.path.join(OUTPUT_DIR, "*.png")))
39
+ for f in frame_files:
40
+ slider_images.append(Image.open(f))
41
+
42
+ return slider_images, output_video
43
+
44
+ # Gradio UI
45
+ with gr.Blocks() as demo:
46
+ gr.Markdown("# Depth Anything V2 - Video Demo")
47
+ gr.Markdown("Upload an MP4 video, and the system will automatically generate a DepthMap video.")
48
+
49
+ video_input = gr.File(label="Upload MP4", file_types=['.mp4'])
50
+ depth_slider = ImageSlider(label="Depth Map Slider") # Slider view of frames
51
+ video_output = gr.Video(label="DepthMap Video")
52
+ submit = gr.Button("Render DepthMap")
53
+
54
+ submit.click(fn=process_video, inputs=[video_input], outputs=[depth_slider, video_output])
55
+
56
+ if __name__ == "__main__":
57
  demo.queue().launch()