ZenosArrows commited on
Commit
f9924d6
·
verified ·
1 Parent(s): 150ce55

Download model checkpoint from HF hub

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -1,9 +1,11 @@
1
  import glob
2
  import gradio as gr
3
  import numpy as np
 
4
  import torch
5
  import tempfile
6
  import uuid
 
7
  from PIL import Image, ImageOps, ImageEnhance
8
  from pathlib import Path
9
  from zipfile import ZipFile, is_zipfile
@@ -81,6 +83,12 @@ model_configs = {
81
  'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
82
  'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
83
  }
 
 
 
 
 
 
84
 
85
  title = "# Depth Anything V2"
86
  description = """Looking Glass demo for **Depth Anything V2**.
@@ -113,9 +121,11 @@ def upscale_image(image, model, background, discard_alpha):
113
  image = model.infer(image)
114
  return image.convert("RGB") if discard_alpha else image
115
 
116
- def on_submit(image, batch_images, book, config, upscale_model, upscale_method, denoise_level, discard_alpha, progress=gr.Progress()):
117
- model = DepthAnythingV2(**model_configs[config])
118
- state_dict = torch.load(f'checkpoints/depth_anything_v2_{config}.pth', map_location="cpu")
 
 
119
  model.load_state_dict(state_dict)
120
  model = model.to(DEVICE).eval()
121
 
 
1
  import glob
2
  import gradio as gr
3
  import numpy as np
4
+ import spaces
5
  import torch
6
  import tempfile
7
  import uuid
8
+ from huggingface_hub import hf_hub_download
9
  from PIL import Image, ImageOps, ImageEnhance
10
  from pathlib import Path
11
  from zipfile import ZipFile, is_zipfile
 
83
  'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
84
  'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
85
  }
86
+ encoder2name = {
87
+ 'vits': 'Small',
88
+ 'vitb': 'Base',
89
+ 'vitl': 'Large',
90
+ 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
91
+ }
92
 
93
  title = "# Depth Anything V2"
94
  description = """Looking Glass demo for **Depth Anything V2**.
 
121
  image = model.infer(image)
122
  return image.convert("RGB") if discard_alpha else image
123
 
124
+ def on_submit(image, batch_images, book, encoder, upscale_model, upscale_method, denoise_level, discard_alpha, progress=gr.Progress()):
125
+ model_name = encoder2name[encoder]
126
+ model = DepthAnythingV2(**model_configs[encoder])
127
+ filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model")
128
+ state_dict = torch.load(filepath, map_location="cpu")
129
  model.load_state_dict(state_dict)
130
  model = model.to(DEVICE).eval()
131