Spaces:
Runtime error
Runtime error
upload app
Browse files- .gitattributes +3 -0
- app.py +17 -0
- assets/examples/Jordi_cat_portrait.jpg +3 -0
- assets/examples/cubes.png +3 -0
- assets/examples/girl_on_desk.jpg +3 -0
- assets/examples/girl_praying.jpg +3 -0
- assets/examples/tiger.jpg +3 -0
- model.py +37 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from model import predict
|
| 4 |
+
|
| 5 |
+
examples_dir = "assets/examples/"
|
| 6 |
+
examples = [[os.path.join(examples_dir, filename)] for filename in os.listdir(examples_dir)]
|
| 7 |
+
|
| 8 |
+
interface = gr.Interface(
|
| 9 |
+
fn=predict,
|
| 10 |
+
inputs=gr.Image(type="pil"),
|
| 11 |
+
outputs=gr.Image(type="pil"),
|
| 12 |
+
title="DepthPro: Monocular Depth Estimation",
|
| 13 |
+
examples=examples,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
if __name__ == "__main__":
|
| 17 |
+
interface.launch()
|
assets/examples/Jordi_cat_portrait.jpg
ADDED
|
Git LFS Details
|
assets/examples/cubes.png
ADDED
|
Git LFS Details
|
assets/examples/girl_on_desk.jpg
ADDED
|
Git LFS Details
|
assets/examples/girl_praying.jpg
ADDED
|
Git LFS Details
|
assets/examples/tiger.jpg
ADDED
|
Git LFS Details
|
model.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
# custom installation from this PR: https://github.com/huggingface/transformers/pull/34583
|
| 5 |
+
# !pip install git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
|
| 6 |
+
from transformers import DepthProImageProcessorFast, DepthProForDepthEstimation
|
| 7 |
+
|
| 8 |
+
# initialize processor and model
|
| 9 |
+
checkpoint = "geetu040/DepthPro"
|
| 10 |
+
revision = "project"
|
| 11 |
+
image_processor = DepthProImageProcessorFast.from_pretrained(checkpoint, revision=revision)
|
| 12 |
+
model = DepthProForDepthEstimation.from_pretrained(checkpoint, revision=revision)
|
| 13 |
+
model = model.to('cuda')
|
| 14 |
+
|
| 15 |
+
def predict(image):
|
| 16 |
+
# inference
|
| 17 |
+
|
| 18 |
+
# prepare image for the model
|
| 19 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
| 20 |
+
inputs = {k: v.to('cuda') for k, v in inputs.items()}
|
| 21 |
+
|
| 22 |
+
with torch.no_grad():
|
| 23 |
+
outputs = model(**inputs)
|
| 24 |
+
|
| 25 |
+
# interpolate to original size
|
| 26 |
+
post_processed_output = image_processor.post_process_depth_estimation(
|
| 27 |
+
outputs, target_sizes=[(image.height, image.width)],
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
# visualize the prediction
|
| 31 |
+
depth = post_processed_output[0]["predicted_depth"]
|
| 32 |
+
depth = (depth - depth.min()) / depth.max()
|
| 33 |
+
depth = depth * 255.
|
| 34 |
+
depth = depth.detach().cpu().numpy()
|
| 35 |
+
depth = Image.fromarray(depth.astype("uint8"))
|
| 36 |
+
|
| 37 |
+
return depth
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
numpy
|
| 3 |
+
pillow
|
| 4 |
+
torch
|
| 5 |
+
torchvision
|
| 6 |
+
git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
|