update README
Browse files
README.md
CHANGED
|
@@ -1,3 +1,173 @@
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
---
|
| 4 |
+
|
| 5 |
+
# DepthPro: Human Segmentation
|
| 6 |
+
|
| 7 |
+
- This work is a part of the [DepthPro: Beyond Depth Estimation](https://github.com/geetu040/depthpro-beyond-depth) repository, which further explores this model's capabilities on:
|
| 8 |
+
- Image Segmentation - Human Segmentation
|
| 9 |
+
- Image Super Resolution - 384px to 1536px (4x Upscaling)
|
| 10 |
+
- Image Super Resolution - 256px to 1024px (4x Upscaling)
|
| 11 |
+
|
| 12 |
+
# Usage
|
| 13 |
+
|
| 14 |
+
Install the required libraries:
|
| 15 |
+
```bash
|
| 16 |
+
pip install -q numpy pillow torch torchvision
|
| 17 |
+
pip install -q git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
Import the required libraries:
|
| 21 |
+
```py
|
| 22 |
+
import requests
|
| 23 |
+
from PIL import Image
|
| 24 |
+
import torch
|
| 25 |
+
from huggingface_hub import hf_hub_download
|
| 26 |
+
import matplotlib.pyplot as plt
|
| 27 |
+
|
| 28 |
+
# custom installation from this PR: https://github.com/huggingface/transformers/pull/34583
|
| 29 |
+
# !pip install git+https://github.com/geetu040/transformers.git@depth-pro-projects#egg=transformers
|
| 30 |
+
from transformers import DepthProConfig, DepthProImageProcessorFast, DepthProForDepthEstimation
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
Load DepthProForDepthEstimation model
|
| 34 |
+
```py
|
| 35 |
+
# load DepthPro model, used as backbone
|
| 36 |
+
config = DepthProConfig(
|
| 37 |
+
patch_size=192,
|
| 38 |
+
patch_embeddings_size=16,
|
| 39 |
+
num_hidden_layers=12,
|
| 40 |
+
intermediate_hook_ids=[11, 8, 7, 5],
|
| 41 |
+
intermediate_feature_dims=[256, 256, 256, 256],
|
| 42 |
+
scaled_images_ratios=[0.5, 1.0],
|
| 43 |
+
scaled_images_overlap_ratios=[0.5, 0.25],
|
| 44 |
+
scaled_images_feature_dims=[1024, 512],
|
| 45 |
+
use_fov_model=False,
|
| 46 |
+
)
|
| 47 |
+
depthpro_for_depth_estimation = DepthProForDepthEstimation(config)
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
Create DepthProForSuperResolution model
|
| 51 |
+
```py
|
| 52 |
+
# create DepthPro for super resolution
|
| 53 |
+
class DepthProForSuperResolution(torch.nn.Module):
|
| 54 |
+
def __init__(self, depthpro_for_depth_estimation):
|
| 55 |
+
super().__init__()
|
| 56 |
+
|
| 57 |
+
self.depthpro_for_depth_estimation = depthpro_for_depth_estimation
|
| 58 |
+
hidden_size = self.depthpro_for_depth_estimation.config.fusion_hidden_size
|
| 59 |
+
|
| 60 |
+
self.image_head = torch.nn.Sequential(
|
| 61 |
+
torch.nn.ConvTranspose2d(
|
| 62 |
+
in_channels=config.num_channels,
|
| 63 |
+
out_channels=hidden_size,
|
| 64 |
+
kernel_size=4, stride=2, padding=1
|
| 65 |
+
),
|
| 66 |
+
torch.nn.ReLU(),
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
self.head = torch.nn.Sequential(
|
| 70 |
+
torch.nn.Conv2d(
|
| 71 |
+
in_channels=hidden_size,
|
| 72 |
+
out_channels=hidden_size,
|
| 73 |
+
kernel_size=3, stride=1, padding=1
|
| 74 |
+
),
|
| 75 |
+
torch.nn.ReLU(),
|
| 76 |
+
torch.nn.ConvTranspose2d(
|
| 77 |
+
in_channels=hidden_size,
|
| 78 |
+
out_channels=hidden_size,
|
| 79 |
+
kernel_size=4, stride=2, padding=1
|
| 80 |
+
),
|
| 81 |
+
torch.nn.ReLU(),
|
| 82 |
+
torch.nn.Conv2d(
|
| 83 |
+
in_channels=hidden_size,
|
| 84 |
+
out_channels=self.depthpro_for_depth_estimation.config.num_channels,
|
| 85 |
+
kernel_size=3, stride=1, padding=1
|
| 86 |
+
),
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
def forward(self, pixel_values):
|
| 90 |
+
# x is the low resolution image
|
| 91 |
+
x = pixel_values
|
| 92 |
+
encoder_features = self.depthpro_for_depth_estimation.depth_pro(x).features
|
| 93 |
+
fused_hidden_state = self.depthpro_for_depth_estimation.fusion_stage(encoder_features)[-1]
|
| 94 |
+
x = self.image_head(x)
|
| 95 |
+
x = torch.nn.functional.interpolate(x, size=fused_hidden_state.shape[2:])
|
| 96 |
+
x = x + fused_hidden_state
|
| 97 |
+
x = self.head(x)
|
| 98 |
+
return x
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
Load the model and image processor:
|
| 102 |
+
```py
|
| 103 |
+
# initialize the model
|
| 104 |
+
model = DepthProForSuperResolution(depthpro_for_depth_estimation)
|
| 105 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 106 |
+
model = model.to(device)
|
| 107 |
+
|
| 108 |
+
# load weights
|
| 109 |
+
weights_path = hf_hub_download(repo_id="geetu040/DepthPro_SR_4x_384p", filename="model_weights.pth")
|
| 110 |
+
model.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu')))
|
| 111 |
+
|
| 112 |
+
# load image processor
|
| 113 |
+
image_processor = DepthProImageProcessorFast(
|
| 114 |
+
do_resize=True,
|
| 115 |
+
size={"width": 384, "height": 384},
|
| 116 |
+
do_rescale=True,
|
| 117 |
+
do_normalize=True
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# define crop function to ensure square image
|
| 121 |
+
def crop_image(image):
|
| 122 |
+
"""
|
| 123 |
+
Crops the image from the center to make aspect ratio 1:1.
|
| 124 |
+
"""
|
| 125 |
+
width, height = image.size
|
| 126 |
+
min_dim = min(width, height)
|
| 127 |
+
left = (width - min_dim) // 2
|
| 128 |
+
top = (height - min_dim) // 2
|
| 129 |
+
right = left + min_dim
|
| 130 |
+
bottom = top + min_dim
|
| 131 |
+
image = image.crop((left, top, right, bottom))
|
| 132 |
+
return image
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
Inference:
|
| 136 |
+
```py
|
| 137 |
+
# inference
|
| 138 |
+
|
| 139 |
+
url = "https://huggingface.co/spaces/geetu040/DepthPro_SR_4x_384p/resolve/main/assets/examples/man_with_arms_open.jpeg"
|
| 140 |
+
|
| 141 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
| 142 |
+
image = crop_image(image)
|
| 143 |
+
image = image.resize((384, 384), Image.Resampling.BICUBIC)
|
| 144 |
+
|
| 145 |
+
# prepare image for the model
|
| 146 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
| 147 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 148 |
+
|
| 149 |
+
with torch.no_grad():
|
| 150 |
+
outputs = model(**inputs)
|
| 151 |
+
|
| 152 |
+
# convert tensors to PIL.Image
|
| 153 |
+
output = outputs[0] # extract the first and only batch
|
| 154 |
+
output = output.cpu() # unload from cuda if used
|
| 155 |
+
output = torch.permute(output, (1, 2, 0)) # (C, H, W) -> (H, W, C)
|
| 156 |
+
output = output * 0.5 + 0.5 # undo normalization
|
| 157 |
+
output = output * 255. # undo scaling
|
| 158 |
+
output = output.clip(0, 255.) # fix out of range
|
| 159 |
+
output = output.numpy() # convert to numpy
|
| 160 |
+
output = output.astype('uint8') # convert to PIL.Image compatible format
|
| 161 |
+
output = Image.fromarray(output) # create PIL.Image object
|
| 162 |
+
|
| 163 |
+
# visualize the prediction
|
| 164 |
+
fig, axes = plt.subplots(1, 2, figsize=(20, 20))
|
| 165 |
+
axes[0].imshow(image)
|
| 166 |
+
axes[0].set_title(f'Low-Resolution (LR) {image.size}')
|
| 167 |
+
axes[0].axis('off')
|
| 168 |
+
axes[1].imshow(output)
|
| 169 |
+
axes[1].set_title(f'Super-Resolution (SR) {output.size}')
|
| 170 |
+
axes[1].axis('off')
|
| 171 |
+
plt.subplots_adjust(wspace=0, hspace=0)
|
| 172 |
+
plt.show()
|
| 173 |
+
```
|