WhiteAiZ commited on
Commit
0a0b7b5
·
verified ·
1 Parent(s): f1a2653

Delete extensions-builtin/forge_preprocessor_marigold

Browse files
extensions-builtin/forge_preprocessor_marigold/marigold/model/__init__.py DELETED
File without changes
extensions-builtin/forge_preprocessor_marigold/marigold/model/marigold_pipeline.py DELETED
@@ -1,313 +0,0 @@
1
- # Author: Bingxin Ke
2
- # Last modified: 2023-12-11
3
-
4
- import logging
5
- from typing import Dict
6
-
7
- import numpy as np
8
- import torch
9
- from diffusers import (
10
- DDIMScheduler,
11
- DDPMScheduler,
12
- PNDMScheduler,
13
- DEISMultistepScheduler,
14
- SchedulerMixin,
15
- UNet2DConditionModel,
16
- )
17
- from torch import nn
18
- from torch.nn import Conv2d
19
- from torch.nn.parameter import Parameter
20
- from tqdm.auto import tqdm
21
- from transformers import CLIPTextModel, CLIPTokenizer
22
-
23
- from .rgb_encoder import RGBEncoder
24
- from .stacked_depth_AE import StackedDepthAE
25
-
26
-
27
- class MarigoldPipeline(nn.Module):
28
- """
29
- Marigold monocular depth estimator.
30
- """
31
-
32
- def __init__(
33
- self,
34
- unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
35
- rgb_encoder_pretrained_path: Dict,
36
- depht_ae_pretrained_path: Dict,
37
- noise_scheduler_pretrained_path: Dict,
38
- tokenizer_pretrained_path: Dict,
39
- text_encoder_pretrained_path: Dict,
40
- empty_text_embed=None,
41
- trainable_unet=False,
42
- rgb_latent_scale_factor=0.18215,
43
- depth_latent_scale_factor=0.18215,
44
- noise_scheduler_type=None,
45
- enable_gradient_checkpointing=False,
46
- enable_xformers=True,
47
- ) -> None:
48
- super().__init__()
49
-
50
- self.rgb_latent_scale_factor = rgb_latent_scale_factor
51
- self.depth_latent_scale_factor = depth_latent_scale_factor
52
- self.device = "cpu"
53
-
54
- # ******* Initialize modules *******
55
- # Trainable modules
56
- self.trainable_module_dic: Dict[str, nn.Module] = {}
57
- self.trainable_unet = trainable_unet
58
-
59
- # Denoising UNet
60
- self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
61
- unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
62
- )
63
- logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
64
- if 8 != self.unet.config["in_channels"]:
65
- self._replace_unet_conv_in()
66
- logging.warning("Unet conv_in layer is replaced")
67
- if enable_xformers:
68
- self.unet.enable_xformers_memory_efficient_attention()
69
- else:
70
- self.unet.disable_xformers_memory_efficient_attention()
71
-
72
- # Image encoder
73
- self.rgb_encoder = RGBEncoder(
74
- pretrained_path=rgb_encoder_pretrained_path["path"],
75
- subfolder=rgb_encoder_pretrained_path["subfolder"],
76
- )
77
- logging.info(
78
- f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}"
79
- )
80
- self.rgb_encoder.requires_grad_(False)
81
-
82
- # Depth encoder-decoder
83
- self.depth_ae = StackedDepthAE(
84
- pretrained_path=depht_ae_pretrained_path["path"],
85
- subfolder=depht_ae_pretrained_path["subfolder"],
86
- )
87
- logging.info(
88
- f"pretrained Depth Autoencoder loaded from: {rgb_encoder_pretrained_path}"
89
- )
90
-
91
- # Trainability
92
- # unet
93
- if self.trainable_unet:
94
- self.unet.requires_grad_(True)
95
- self.trainable_module_dic["unet"] = self.unet
96
- logging.debug(f"UNet is set to trainable")
97
- else:
98
- self.unet.requires_grad_(False)
99
- logging.debug(f"UNet is set to frozen")
100
-
101
- # Gradient checkpointing
102
- if enable_gradient_checkpointing:
103
- self.unet.enable_gradient_checkpointing()
104
- self.depth_ae.vae.enable_gradient_checkpointing()
105
-
106
- # Noise scheduler
107
- if "DDPMScheduler" == noise_scheduler_type:
108
- self.noise_scheduler: SchedulerMixin = DDPMScheduler.from_pretrained(
109
- noise_scheduler_pretrained_path["path"],
110
- subfolder=noise_scheduler_pretrained_path["subfolder"],
111
- )
112
- elif "DDIMScheduler" == noise_scheduler_type:
113
- self.noise_scheduler: SchedulerMixin = DDIMScheduler.from_pretrained(
114
- noise_scheduler_pretrained_path["path"],
115
- subfolder=noise_scheduler_pretrained_path["subfolder"],
116
- )
117
- elif "PNDMScheduler" == noise_scheduler_type:
118
- self.noise_scheduler: SchedulerMixin = PNDMScheduler.from_pretrained(
119
- noise_scheduler_pretrained_path["path"],
120
- subfolder=noise_scheduler_pretrained_path["subfolder"],
121
- )
122
- elif "DEISMultistepScheduler" == noise_scheduler_type:
123
- self.noise_scheduler: SchedulerMixin = DEISMultistepScheduler.from_pretrained(
124
- noise_scheduler_pretrained_path["path"],
125
- subfolder=noise_scheduler_pretrained_path["subfolder"],
126
- )
127
- else:
128
- raise NotImplementedError
129
-
130
- # Text embed for empty prompt (always in CPU)
131
- if empty_text_embed is None:
132
- tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(
133
- tokenizer_pretrained_path["path"],
134
- subfolder=tokenizer_pretrained_path["subfolder"],
135
- )
136
- text_encoder: CLIPTextModel = CLIPTextModel.from_pretrained(
137
- text_encoder_pretrained_path["path"],
138
- subfolder=text_encoder_pretrained_path["subfolder"],
139
- )
140
- with torch.no_grad():
141
- self.empty_text_embed = self._encode_text(
142
- "", tokenizer, text_encoder
143
- ).detach()#.to(dtype=precision) # [1, 2, 1024]
144
- else:
145
- self.empty_text_embed = empty_text_embed
146
-
147
- def from_pretrained(pretrained_path, **kwargs):
148
- return __class__(
149
- unet_pretrained_path={"path": pretrained_path, "subfolder": "unet"},
150
- rgb_encoder_pretrained_path={"path": pretrained_path, "subfolder": "vae"},
151
- depht_ae_pretrained_path={"path": pretrained_path, "subfolder": "vae"},
152
- noise_scheduler_pretrained_path={
153
- "path": pretrained_path,
154
- "subfolder": "scheduler",
155
- },
156
- tokenizer_pretrained_path={
157
- "path": pretrained_path,
158
- "subfolder": "tokenizer",
159
- },
160
- text_encoder_pretrained_path={
161
- "path": pretrained_path,
162
- "subfolder": "text_encoder",
163
- },
164
- **kwargs,
165
- )
166
-
167
- def _replace_unet_conv_in(self):
168
- # Replace the first layer to accept 8 in_channels. Only applied when loading pretrained SD U-Net
169
- _weight = self.unet.conv_in.weight.clone() # [320, 4, 3, 3]
170
- _bias = self.unet.conv_in.bias.clone() # [320]
171
- _weight = _weight.repeat((1, 2, 1, 1)) # Keep selected channel(s)
172
- # half the activation magnitude
173
- _weight *= 0.5
174
- _bias *= 0.5
175
- # new conv_in channel
176
- _n_convin_out_channel = self.unet.conv_in.out_channels
177
- _new_conv_in = Conv2d(
178
- 8, _n_convin_out_channel, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
179
- )
180
- _new_conv_in.weight = Parameter(_weight)
181
- _new_conv_in.bias = Parameter(_bias)
182
- self.unet.conv_in = _new_conv_in
183
- # replace config
184
- self.unet.config["in_channels"] = 8
185
- return
186
-
187
- def to(self, device):
188
- self.rgb_encoder.to(device)
189
- self.depth_ae.to(device)
190
- self.unet.to(device)
191
- self.empty_text_embed = self.empty_text_embed.to(device)
192
- self.device = device
193
- return self
194
-
195
- def forward(
196
- self,
197
- rgb_in,
198
- num_inference_steps: int = 50,
199
- num_output_inter_results: int = 0,
200
- show_pbar=False,
201
- init_depth_latent=None,
202
- return_depth_latent=False,
203
- ):
204
- device = rgb_in.device
205
- precision = self.unet.dtype
206
- # Set timesteps
207
- self.noise_scheduler.set_timesteps(num_inference_steps, device=device)
208
- timesteps = self.noise_scheduler.timesteps # [T]
209
-
210
- # Encode image
211
- rgb_latent = self.encode_rgb(rgb_in)
212
-
213
- # Initial depth map (noise)
214
- if init_depth_latent is not None:
215
- init_depth_latent = init_depth_latent.to(dtype=precision)
216
- assert (
217
- init_depth_latent.shape == rgb_latent.shape
218
- ), "initial depth latent should be the size of [B, 4, H/8, W/8]"
219
- depth_latent = init_depth_latent
220
- depth_latent = torch.randn(rgb_latent.shape, device=device, dtype=precision)
221
- else:
222
- depth_latent = torch.randn(rgb_latent.shape, device=device) # [B, 4, h, w]
223
-
224
- # Expand text embedding for batch
225
- batch_empty_text_embed = self.empty_text_embed.repeat(
226
- (rgb_latent.shape[0], 1, 1)
227
- ).to(device=device, dtype=precision) # [B, 2, 1024]
228
-
229
- # Export intermediate denoising steps
230
- if num_output_inter_results > 0:
231
- depth_latent_ls = []
232
- inter_steps = []
233
- _idx = (
234
- -1
235
- * (
236
- np.arange(0, num_output_inter_results)
237
- * num_inference_steps
238
- / num_output_inter_results
239
- )
240
- .round()
241
- .astype(int)
242
- - 1
243
- )
244
- steps_to_output = timesteps[_idx]
245
-
246
- # Denoising loop
247
- if show_pbar:
248
- iterable = tqdm(enumerate(timesteps), total=len(timesteps), leave=False, desc="denoising")
249
- else:
250
- iterable = enumerate(timesteps)
251
- for i, t in iterable:
252
- unet_input = torch.cat(
253
- [rgb_latent, depth_latent], dim=1
254
- ) # this order is important
255
- unet_input = unet_input.to(dtype=precision)
256
- # predict the noise residual
257
- noise_pred = self.unet(
258
- unet_input, t, encoder_hidden_states=batch_empty_text_embed
259
- ).sample # [B, 4, h, w]
260
- # compute the previous noisy sample x_t -> x_t-1
261
- depth_latent = self.noise_scheduler.step(
262
- noise_pred, t, depth_latent
263
- ).prev_sample.to(dtype=precision)
264
-
265
-
266
- if num_output_inter_results > 0 and t in steps_to_output:
267
- depth_latent_ls.append(depth_latent.detach().clone())
268
- #depth_latent_ls = depth_latent_ls.to(dtype=precision)
269
- inter_steps.append(t - 1)
270
-
271
- # Decode depth latent
272
- if num_output_inter_results > 0:
273
- assert 0 in inter_steps
274
- depth = [self.decode_depth(lat) for lat in depth_latent_ls]
275
- if return_depth_latent:
276
- return depth, inter_steps, depth_latent_ls
277
- else:
278
- return depth, inter_steps
279
- else:
280
- depth = self.decode_depth(depth_latent)
281
- if return_depth_latent:
282
- return depth, depth_latent
283
- else:
284
- return depth
285
-
286
- def encode_rgb(self, rgb_in):
287
- rgb_latent = self.rgb_encoder(rgb_in) # [B, 4, h, w]
288
- rgb_latent = rgb_latent * self.rgb_latent_scale_factor
289
- return rgb_latent
290
-
291
- def encode_depth(self, depth_in):
292
- depth_latent = self.depth_ae.encode(depth_in)
293
- depth_latent = depth_latent * self.depth_latent_scale_factor
294
- return depth_latent
295
-
296
- def decode_depth(self, depth_latent):
297
- #depth_latent = depth_latent.to(dtype=torch.float16)
298
- depth_latent = depth_latent / self.depth_latent_scale_factor
299
- depth = self.depth_ae.decode(depth_latent) # [B, 1, H, W]
300
- return depth
301
-
302
- @staticmethod
303
- def _encode_text(prompt, tokenizer, text_encoder):
304
- text_inputs = tokenizer(
305
- prompt,
306
- padding="do_not_pad",
307
- max_length=tokenizer.model_max_length,
308
- truncation=True,
309
- return_tensors="pt",
310
- )
311
- text_input_ids = text_inputs.input_ids.to(text_encoder.device)
312
- text_embed = text_encoder(text_input_ids)[0]
313
- return text_embed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/model/rgb_encoder.py DELETED
@@ -1,36 +0,0 @@
1
- # Author: Bingxin Ke
2
- # Last modified: 2023-12-05
3
-
4
- import torch
5
- import torch.nn as nn
6
- import logging
7
- from diffusers import AutoencoderKL
8
-
9
-
10
- class RGBEncoder(nn.Module):
11
- """
12
- The encoder of pretrained Stable Diffusion VAE
13
- """
14
-
15
- def __init__(self, pretrained_path, subfolder=None) -> None:
16
- super().__init__()
17
-
18
- vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
19
- logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
20
-
21
- self.rgb_encoder = nn.Sequential(
22
- vae.encoder,
23
- vae.quant_conv,
24
- )
25
-
26
- def to(self, *args, **kwargs):
27
- self.rgb_encoder.to(*args, **kwargs)
28
-
29
- def forward(self, rgb_in):
30
- return self.encode(rgb_in)
31
-
32
- def encode(self, rgb_in):
33
- moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8]
34
- mean, logvar = torch.chunk(moments, 2, dim=1)
35
- rgb_latent = mean
36
- return rgb_latent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/model/stacked_depth_AE.py DELETED
@@ -1,52 +0,0 @@
1
- # Author: Bingxin Ke
2
- # Last modified: 2023-12-05
3
-
4
- import torch
5
- import torch.nn as nn
6
- import logging
7
- from diffusers import AutoencoderKL
8
-
9
-
10
- class StackedDepthAE(nn.Module):
11
- """
12
- Tailored pretrained image VAE for depth map.
13
- Encode: Depth images are repeated into 3 channels.
14
- Decode: The average of 3 chennels are taken as output.
15
- """
16
-
17
- def __init__(self, pretrained_path, subfolder=None) -> None:
18
- super().__init__()
19
-
20
- self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)
21
- logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}")
22
-
23
- def forward(self, depth_in):
24
- depth_latent = self.encode(depth_in)
25
- depth_out = self.decode(depth_latent)
26
- return depth_out
27
-
28
- def to(self, *args, **kwargs):
29
- self.vae.to(*args, **kwargs)
30
-
31
- @staticmethod
32
- def _stack_depth_images(depth_in):
33
- if 4 == len(depth_in.shape):
34
- stacked = depth_in.repeat(1, 3, 1, 1)
35
- elif 3 == len(depth_in.shape):
36
- stacked = depth_in.unsqueeze(1)
37
- stacked = depth_in.repeat(1, 3, 1, 1)
38
- return stacked
39
-
40
- def encode(self, depth_in):
41
- stacked = self._stack_depth_images(depth_in)
42
- h = self.vae.encoder(stacked)
43
- moments = self.vae.quant_conv(h)
44
- mean, logvar = torch.chunk(moments, 2, dim=1)
45
- depth_latent = mean
46
- return depth_latent
47
-
48
- def decode(self, depth_latent):
49
- z = self.vae.post_quant_conv(depth_latent)
50
- stacked = self.vae.decoder(z)
51
- depth_mean = stacked.mean(dim=1, keepdim=True)
52
- return depth_mean
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/util/batchsize.py DELETED
@@ -1,38 +0,0 @@
1
- # Author: Bingxin Ke
2
- # Last modified: 2023-12-11
3
-
4
- import torch
5
- import math
6
-
7
-
8
- # Search table for suggested max. inference batch size
9
- bs_search_table = [
10
- # tested on A100-PCIE-80GB
11
- {"res": 768, "total_vram": 79, "bs": 35},
12
- {"res": 1024, "total_vram": 79, "bs": 20},
13
- # tested on A100-PCIE-40GB
14
- {"res": 768, "total_vram": 39, "bs": 15},
15
- {"res": 1024, "total_vram": 39, "bs": 8},
16
- # tested on RTX3090, RTX4090
17
- {"res": 512, "total_vram": 23, "bs": 20},
18
- {"res": 768, "total_vram": 23, "bs": 7},
19
- {"res": 1024, "total_vram": 23, "bs": 3},
20
- # tested on GTX1080Ti
21
- {"res": 512, "total_vram": 10, "bs": 5},
22
- {"res": 768, "total_vram": 10, "bs": 2},
23
- ]
24
-
25
-
26
-
27
- def find_batch_size(n_repeat, input_res):
28
- total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
29
-
30
- for settings in sorted(bs_search_table, key=lambda k: (k['res'], -k['total_vram'])):
31
- if input_res <= settings['res'] and total_vram >= settings['total_vram']:
32
- bs = settings['bs']
33
- if bs > n_repeat:
34
- bs = n_repeat
35
- elif bs > math.ceil(n_repeat / 2) and bs < n_repeat:
36
- bs = math.ceil(n_repeat / 2)
37
- return bs
38
- return 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/util/ensemble.py DELETED
@@ -1,103 +0,0 @@
1
- # Test align depth images
2
- # Author: Bingxin Ke
3
- # Last modified: 2023-12-11
4
-
5
- import numpy as np
6
- import torch
7
-
8
- from scipy.optimize import minimize
9
-
10
- def inter_distances(tensors):
11
- """
12
- To calculate the distance between each two depth maps.
13
- """
14
- distances = []
15
- for i, j in torch.combinations(torch.arange(tensors.shape[0])):
16
- arr1 = tensors[i:i+1]
17
- arr2 = tensors[j:j+1]
18
- distances.append(arr1 - arr2)
19
- dist = torch.concatenate(distances, dim=0)
20
- return dist
21
-
22
-
23
- def ensemble_depths(input_images, regularizer_strength=0.02, max_iter=2, tol=1e-3, reduction='median', max_res=None, disp=False, device='cuda'):
24
- """
25
- To ensemble multiple affine-invariant depth images (up to scale and shift),
26
- by aligning estimating the scale and shift
27
- """
28
- device = input_images.device
29
- original_input = input_images.clone()
30
- n_img = input_images.shape[0]
31
- ori_shape = input_images.shape
32
-
33
- if max_res is not None:
34
- scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
35
- if scale_factor < 1:
36
- downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode='nearest')
37
- input_images = downscaler(torch.from_numpy(input_images)).numpy()
38
-
39
- # init guess
40
- _min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
41
- _max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
42
- s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
43
- t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
44
- x = np.concatenate([s_init, t_init]).reshape(-1)
45
-
46
- input_images = input_images.to(device)
47
-
48
- # objective function
49
- def closure(x):
50
- x = x.astype(np.float32)
51
- l = len(x)
52
- s = x[:int(l/2)]
53
- t = x[int(l/2):]
54
- s = torch.from_numpy(s).to(device)
55
- t = torch.from_numpy(t).to(device)
56
-
57
- transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
58
- dists = inter_distances(transformed_arrays)
59
- sqrt_dist = torch.sqrt(torch.mean(dists**2))
60
-
61
- if 'mean' == reduction:
62
- pred = torch.mean(transformed_arrays, dim=0)
63
- elif 'median' == reduction:
64
- pred = torch.median(transformed_arrays, dim=0).values
65
- else:
66
- raise ValueError
67
-
68
- near_err = torch.sqrt((0 - torch.min(pred))**2)
69
- far_err = torch.sqrt((1 - torch.max(pred))**2)
70
-
71
- err = sqrt_dist + (near_err + far_err) * regularizer_strength
72
- err = err.detach().cpu().numpy()
73
- return err
74
-
75
- res = minimize(closure, x, method='BFGS', tol=tol, options={'maxiter': max_iter, 'disp': disp})
76
- x = res.x
77
- l = len(x)
78
- s = x[:int(l/2)]
79
- t = x[int(l/2):]
80
-
81
- # Prediction
82
- s = torch.from_numpy(s).to(device)
83
- t = torch.from_numpy(t).to(device)
84
- transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
85
- if 'mean' == reduction:
86
- aligned_images = torch.mean(transformed_arrays, dim=0)
87
- std = torch.std(transformed_arrays, dim=0)
88
- uncertainty = std
89
- elif 'median' == reduction:
90
- aligned_images = torch.median(transformed_arrays, dim=0).values
91
- # MAD (median absolute deviation) as uncertainty indicator
92
- abs_dev = torch.abs(transformed_arrays - aligned_images)
93
- mad = torch.median(abs_dev, dim=0).values
94
- uncertainty = mad
95
- else:
96
- raise ValueError
97
-
98
- # Scale and shift to [0, 1]
99
- _min = torch.min(aligned_images)
100
- _max = torch.max(aligned_images)
101
- aligned_images = (aligned_images - _min) / (_max - _min)
102
- uncertainty /= (_max - _min)
103
- return aligned_images, uncertainty
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/util/image_util.py DELETED
@@ -1,66 +0,0 @@
1
-
2
- import matplotlib
3
- import numpy as np
4
- import torch
5
- from PIL import Image
6
-
7
- def colorize_depth_maps(depth_map, min_depth, max_depth, cmap='Spectral', valid_mask=None):
8
- """
9
- Colorize depth maps.
10
- """
11
- assert len(depth_map.shape) >= 2, "Invalid dimension"
12
-
13
- if isinstance(depth_map, torch.Tensor):
14
- depth = depth_map.detach().clone().squeeze().numpy()
15
- elif isinstance(depth_map, np.ndarray):
16
- depth = depth_map.copy().squeeze()
17
- # reshape to [ (B,) H, W ]
18
- if depth.ndim < 3:
19
- depth = depth[np.newaxis, :, :]
20
-
21
- # colorize
22
- cm = matplotlib.colormaps[cmap]
23
- depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
24
- img_colored_np = cm(depth, bytes=False)[:,:,:,0:3] # value from 0 to 1
25
- img_colored_np = np.rollaxis(img_colored_np, 3, 1)
26
-
27
- if valid_mask is not None:
28
- if isinstance(depth_map, torch.Tensor):
29
- valid_mask = valid_mask.detach().numpy()
30
- valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
31
- if valid_mask.ndim < 3:
32
- valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
33
- else:
34
- valid_mask = valid_mask[:, np.newaxis, :, :]
35
- valid_mask = np.repeat(valid_mask, 3, axis=1)
36
- img_colored_np[~valid_mask] = 0
37
-
38
- if isinstance(depth_map, torch.Tensor):
39
- img_colored = torch.from_numpy(img_colored_np).float()
40
- elif isinstance(depth_map, np.ndarray):
41
- img_colored = img_colored_np
42
-
43
- return img_colored
44
-
45
-
46
- def chw2hwc(chw):
47
- assert 3 == len(chw.shape)
48
- if isinstance(chw, torch.Tensor):
49
- hwc = torch.permute(chw, (1, 2, 0))
50
- elif isinstance(chw, np.ndarray):
51
- hwc = np.moveaxis(chw, 0, -1)
52
- return hwc
53
-
54
-
55
- def resize_max_res(img: Image.Image, max_edge_resolution):
56
- original_width, original_height = img.size
57
- downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
58
-
59
- new_width = int(original_width * downscale_factor)
60
- new_height = int(original_height * downscale_factor)
61
-
62
- resized_img = img.resize((new_width, new_height))
63
- return resized_img
64
-
65
-
66
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/marigold/util/seed_all.py DELETED
@@ -1,14 +0,0 @@
1
-
2
- import numpy as np
3
- import random
4
- import torch
5
-
6
-
7
- def seed_all(seed: int = 0):
8
- """
9
- Set random seeds of all components.
10
- """
11
- random.seed(seed)
12
- np.random.seed(seed)
13
- torch.manual_seed(seed)
14
- torch.cuda.manual_seed_all(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extensions-builtin/forge_preprocessor_marigold/scripts/preprocessor_marigold.py DELETED
@@ -1,68 +0,0 @@
1
- from modules_forge.supported_preprocessor import Preprocessor, PreprocessorParameter
2
- from modules_forge.shared import preprocessor_dir, add_supported_preprocessor
3
- from modules_forge.forge_util import resize_image_with_pad
4
-
5
-
6
- import os
7
- import torch
8
- import numpy as np
9
-
10
- from marigold.model.marigold_pipeline import MarigoldPipeline
11
- from huggingface_hub import snapshot_download
12
- from modules_forge.diffusers_patcher import DiffusersModelPatcher
13
- from modules_forge.forge_util import numpy_to_pytorch, HWC3
14
-
15
-
16
- class PreprocessorMarigold(Preprocessor):
17
- def __init__(self):
18
- super().__init__()
19
- self.name = 'depth_marigold'
20
- self.tags = ['Depth']
21
- self.model_filename_filters = ['depth']
22
- self.slider_resolution = PreprocessorParameter(
23
- label='Resolution', minimum=128, maximum=2048, value=768, step=8, visible=True)
24
- self.slider_1 = PreprocessorParameter(visible=False)
25
- self.slider_2 = PreprocessorParameter(visible=False)
26
- self.slider_3 = PreprocessorParameter(visible=False)
27
- self.show_control_mode = True
28
- self.do_not_need_model = False
29
- self.sorting_priority = 100 # higher goes to top in the list
30
- self.diffusers_patcher = None
31
-
32
- def load_model(self):
33
- if self.model_patcher is not None:
34
- return
35
-
36
- self.diffusers_patcher = DiffusersModelPatcher(
37
- pipeline_class=MarigoldPipeline,
38
- pretrained_path="Bingxin/Marigold",
39
- enable_xformers=False,
40
- noise_scheduler_type='DDIMScheduler')
41
-
42
- return
43
-
44
- def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
45
- input_image, remove_pad = resize_image_with_pad(input_image, resolution)
46
-
47
- self.load_model()
48
-
49
- H, W, C = input_image.shape
50
-
51
- self.diffusers_patcher.prepare_memory_before_sampling(
52
- batchsize=1, latent_width=W // 8, latent_height=H // 8
53
- )
54
-
55
- with torch.no_grad():
56
- img = numpy_to_pytorch(input_image).movedim(-1, 1)
57
- img = self.diffusers_patcher.move_tensor_to_current_device(img)
58
-
59
- img = img * 2.0 - 1.0
60
- depth = self.diffusers_patcher.pipeline(img, num_inference_steps=20, show_pbar=False)
61
- depth = 0.5 - depth * 0.5
62
- depth = depth.movedim(1, -1)[0].cpu().numpy()
63
- depth_image = HWC3((depth * 255.0).clip(0, 255).astype(np.uint8))
64
-
65
- return remove_pad(depth_image)
66
-
67
-
68
- add_supported_preprocessor(PreprocessorMarigold())