Spaces:
Running
Running
| import gradio as gr | |
| import cv2 | |
| import matplotlib | |
| import numpy as np | |
| import os | |
| from PIL import Image | |
| import spaces | |
| import torch | |
| import tempfile | |
| from gradio_imageslider import ImageSlider | |
| from huggingface_hub import hf_hub_download | |
| from depth_anything_v2.dpt import DepthAnythingV2 | |
| css = """ | |
| #img-display-container { | |
| max-height: 100vh; | |
| } | |
| #img-display-input { | |
| max-height: 80vh; | |
| } | |
| #img-display-output { | |
| max-height: 80vh; | |
| } | |
| #download { | |
| height: 62px; | |
| } | |
| """ | |
| DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| model_configs = { | |
| 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, | |
| 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, | |
| 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, | |
| 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} | |
| } | |
| encoder2name = { | |
| 'vits': 'Small', | |
| 'vitb': 'Base', | |
| 'vitl': 'Large', | |
| 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint | |
| } | |
| encoder = 'vitl' | |
| model_name = encoder2name[encoder] | |
| model = DepthAnythingV2(**model_configs[encoder]) | |
| filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model") | |
| state_dict = torch.load(filepath, map_location="cpu") | |
| model.load_state_dict(state_dict) | |
| model = model.to(DEVICE).eval() | |
| def findNormals(gray_depth, format): | |
| d_im = cv2.cvtColor(cv2.imread(gray_depth).astype(np.uint8), cv2.COLOR_BGR2GRAY) | |
| zy, zx = np.gradient(d_im) | |
| # You may also consider using Sobel to get a joint Gaussian smoothing and differentation | |
| # to reduce noise | |
| #zx = cv2.Sobel(d_im, cv2.CV_64F, 1, 0, ksize=5) | |
| #zy = cv2.Sobel(d_im, cv2.CV_64F, 0, 1, ksize=5) | |
| if format == "opengl": | |
| zy = -zy | |
| normal = np.dstack((np.ones_like(d_im), -zy, -zx)) | |
| n = np.linalg.norm(normal, axis=2) | |
| normal[:, :, 0] /= n | |
| normal[:, :, 1] /= n | |
| normal[:, :, 2] /= n | |
| # offset and rescale values to be in 0-255 | |
| normal += 1 | |
| normal /= 2 | |
| normal *= 255 | |
| return (normal[:, :, ::-1]).astype(np.uint8) | |
| title = "# Depth Anything V2" | |
| description = """Unofficial demo for **Depth Anything V2**. | |
| Please refer to their [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), and [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details.""" | |
| def predict_depth(image): | |
| return model.infer_image(image) | |
| with gr.Blocks(css=css) as demo: | |
| gr.Markdown(title) | |
| gr.Markdown(description) | |
| gr.Markdown("### Depth Prediction demo") | |
| with gr.Row(): | |
| input_image = gr.ImageEditor(label="Input Image", layers=True, sources=('upload', 'clipboard'), show_download_button=True, type="numpy", interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(default_size=1, colors=['black', '#505050', '#a0a0a0', 'white']), elem_id="img-display-input") | |
| with gr.Tab("Depth"): | |
| depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5) | |
| orig_image_file = gr.File(label="Original image", elem_id="original") | |
| gray_depth_file = gr.File(label="Grayscale depth map", elem_id="download") | |
| submit = gr.Button(value="Compute Depth") | |
| with gr.Tab("Normals"): | |
| normals_out = gr.Image(label="Normal map", interactive=False) | |
| format_normals = gr.Radio(choices=["directx", "opengl"]) | |
| find_normals = gr.Button("Find normals") | |
| find_normals.click(fn=findNormals, inputs=[gray_depth_file, format_normals], outputs=[normals_out]) | |
| raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",) | |
| cmap = matplotlib.colormaps.get_cmap('Spectral_r') | |
| def on_submit(img_d): | |
| image = cv2.cvtColor(img_d["composite"], cv2.COLOR_RGBA2RGB) | |
| original_image = image.copy() | |
| h, w = image.shape[:2] | |
| depth = predict_depth(image[:, :, ::-1]) | |
| raw_depth = Image.fromarray(depth.astype('uint16')) | |
| tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False) | |
| raw_depth.save(tmp_raw_depth.name) | |
| depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 | |
| depth = depth.astype(np.uint8) | |
| colored_depth = (cmap(depth)[:, :, :3] * 255).astype(np.uint8) | |
| gray_depth = Image.fromarray(depth) | |
| tmp_gray_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False) | |
| gray_depth.save(tmp_gray_depth.name) | |
| orig_image = Image.fromarray(image) | |
| tmp_orig_image = tempfile.NamedTemporaryFile(suffix='.png', delete=False) | |
| orig_image.save(tmp_orig_image.name) | |
| return [(original_image, colored_depth), tmp_orig_image.name, tmp_gray_depth.name, tmp_raw_depth.name] | |
| submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, orig_image_file, gray_depth_file, raw_file]) | |
| example_files = os.listdir('assets/drawn_examples') | |
| example_files.sort() | |
| example_files = [os.path.join('assets/drawn_examples', filename) for filename in example_files] | |
| examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, orig_image_file, gray_depth_file, raw_file], fn=on_submit) | |
| if __name__ == '__main__': | |
| demo.queue().launch(share=True) | |