| | import gradio as gr |
| | import torch |
| | import copy |
| |
|
| | from gradio_depth_pred import create_demo as create_depth_pred_demo |
| |
|
| | from EcoDepth.utils import download_model |
| |
|
| | download_model("weights_indoor.ckpt") |
| | download_model("weights_outdoor.ckpt") |
| |
|
| | css = """ |
| | #img-display-container { |
| | max-height: 50vh; |
| | } |
| | |
| | /* Center the INPUT image */ |
| | #img-display-input { |
| | display: flex !important; |
| | align-items: center !important; |
| | justify-content: center !important; |
| | } |
| | |
| | #img-display-input img { |
| | max-height: 40vh !important; |
| | width: auto !important; |
| | object-fit: contain !important; |
| | } |
| | |
| | /* Center the OUTPUT image */ |
| | #img-display-output { |
| | display: flex !important; |
| | align-items: center !important; |
| | justify-content: center !important; |
| | } |
| | |
| | #img-display-output img { |
| | max-height: 40vh !important; |
| | width: auto !important; |
| | object-fit: contain !important; |
| | } |
| | """ |
| |
|
| | import json |
| | from EcoDepth.model import EcoDepth |
| |
|
| | class Args: |
| | def __init__(self): |
| | with open("infer_config.json", "r") as f: |
| | config = json.load(f) |
| | for n, v in config.items(): |
| | setattr(self, n, v) |
| |
|
| | |
| | args = Args() |
| |
|
| | args_indoor = copy.deepcopy(args) |
| | args_indoor.no_of_classes = 100 |
| | args_indoor.max_depth = 10 |
| | model_indoor = EcoDepth(args_indoor).eval() |
| | model_str_indoor = f"{args.ckpt_path}/weights_indoor.ckpt" |
| | model_indoor.load_state_dict(torch.load(model_str_indoor, map_location="cpu", weights_only=True)["state_dict"]) |
| |
|
| | args_outdoor = copy.deepcopy(args) |
| | args_outdoor.no_of_classes = 200 |
| | args_outdoor.max_depth = 80 |
| | model_outdoor = EcoDepth(args_outdoor).eval() |
| | model_str_outdoor = f"{args.ckpt_path}/weights_outdoor.ckpt" |
| | model_outdoor.load_state_dict(torch.load(model_str_outdoor, map_location="cpu", weights_only=True)["state_dict"]) |
| |
|
| | title = "# ECoDepth" |
| | description = """Official demo for **ECoDepth: Effective Conditioning of Diffusion Models for Monocular Depth Estimation**. |
| | |
| | EcoDepth is a deep learning model for metric depth estimation from a single image. |
| | |
| | Please refer to our [paper](https://arxiv.org/abs/2403.18807) or [github](https://github.com/Aradhye2002/EcoDepth) for more details.""" |
| |
|
| | with gr.Blocks(css=css) as demo: |
| | gr.Markdown(title) |
| | gr.Markdown(description) |
| | with gr.Tab("Indoor Depth Prediction (v1)"): |
| | create_depth_pred_demo(model_indoor, scene="indoor") |
| | with gr.Tab("Outdoor Depth Prediction (v1)"): |
| | create_depth_pred_demo(model_outdoor, scene="outdoor") |
| |
|
| | gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/aradhye/EcoDepth?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br></center>''') |
| |
|
| | if __name__ == '__main__': |
| | demo.queue().launch() |