SlekLi commited on
Commit
e6ab4cc
·
verified ·
1 Parent(s): 8052936

Upload tran_3dsr.py

Browse files
Files changed (1) hide show
  1. tran_3dsr.py +1112 -0
tran_3dsr.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2023, Inria
3
+ # GRAPHDECO research group, https://team.inria.fr/graphdeco
4
+ # All rights reserved.
5
+ #
6
+ # This software is free for non-commercial, research and evaluation use
7
+ # under the terms of the LICENSE.md file.
8
+ #
9
+ # For inquiries contact george.drettakis@inria.fr
10
+ #
11
+
12
+ import os, glob
13
+ import numpy as np
14
+ import open3d as o3d
15
+ import cv2
16
+ import json
17
+ import torch
18
+ import random
19
+ from random import randint
20
+ from utils.loss_utils import l1_loss, ssim
21
+ from gaussian_renderer import render, network_gui
22
+ from torch import autocast
23
+ import sys
24
+ import copy
25
+ from scene import Scene, GaussianModel
26
+ from utils.general_utils import safe_state
27
+ import uuid
28
+ import lpips
29
+ import pyiqa
30
+ import natsort
31
+ # from tqdm import tqdm
32
+ from utils.image_utils import psnr
33
+ from argparse import ArgumentParser, Namespace
34
+ from arguments import ModelParams, PipelineParams, OptimizationParams
35
+ # from scipy.spatial.transform import Rotation as R, Slerp
36
+ import torchvision
37
+ from scene.cameras import Camera
38
+ from PIL import Image
39
+ from utils.general_utils import PILtoTorch
40
+ try:
41
+ # from torch.utils.tensorboard import SummaryWriter
42
+ from tensorboardX import SummaryWriter
43
+ TENSORBOARD_FOUND = True
44
+ except ImportError:
45
+ TENSORBOARD_FOUND = False
46
+
47
+ ##### Stable SR usage #####
48
+ from pytorch_lightning import seed_everything
49
+ from omegaconf import OmegaConf
50
+ from utils.stable_sr_utils import instantiate_from_config
51
+ from utils.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization
52
+ from contextlib import nullcontext
53
+ from tqdm import tqdm, trange
54
+ from einops import rearrange, repeat
55
+ from utils.util_image import ImageSpliterTh
56
+ import torch.nn.functional as F
57
+ from pathlib import Path
58
+ import time
59
+
60
+ @torch.no_grad()
61
+ def create_offset_gt(image, offset):
62
+ height, width = image.shape[1:]
63
+ meshgrid = np.meshgrid(range(width), range(height), indexing='xy')
64
+ id_coords = np.stack(meshgrid, axis=0).astype(np.float32)
65
+ id_coords = torch.from_numpy(id_coords).cuda()
66
+
67
+ id_coords = id_coords.permute(1, 2, 0) + offset
68
+ id_coords[..., 0] /= (width - 1)
69
+ id_coords[..., 1] /= (height - 1)
70
+ id_coords = id_coords * 2 - 1
71
+
72
+ image = torch.nn.functional.grid_sample(image[None], id_coords[None], align_corners=True, padding_mode="border")[0]
73
+ return image
74
+
75
+ def prepare_training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, args, dataset2=None):
76
+ first_iter = 0
77
+ tb_writer = prepare_output_and_logger(dataset)
78
+ gaussians = GaussianModel(dataset.sh_degree)
79
+
80
+ if args.load_pretrain:
81
+ scene = Scene(dataset, gaussians, load_iteration=30000, shuffle=False)
82
+ scene.model_path = args.output_folder
83
+ dataset_name = os.path.basename(dataset.source_path)
84
+ dataset.model_path = os.path.join(args.output_folder, dataset_name)
85
+
86
+ tb_writer = prepare_output_and_logger(dataset)
87
+ scene.model_path = dataset.model_path
88
+ else:
89
+ scene = Scene(dataset, gaussians)
90
+
91
+ if args.load_pretrain:
92
+ gaussians.max_radii2D = torch.zeros((gaussians.get_xyz.shape[0]), dtype=torch.float32, device="cuda")
93
+ gaussians.training_setup(opt)
94
+ print("--- after loading pretrain points:", gaussians._xyz.shape[0])
95
+ else:
96
+ gaussians.training_setup(opt)
97
+
98
+ if checkpoint:
99
+ (model_params, first_iter) = torch.load(checkpoint)
100
+ gaussians.restore(model_params, opt)
101
+
102
+ bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
103
+ background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
104
+
105
+ out_dict = {"scene": scene, "gaussians": gaussians, "tb_writer": tb_writer}
106
+ return out_dict
107
+
108
+ def training_with_iters(in_dict, dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, args, dataset2=None, SR_iter=0):
109
+ scene = in_dict['scene']
110
+ gaussians = in_dict['gaussians']
111
+ tb_writer = in_dict['tb_writer']
112
+
113
+ first_iter = 0
114
+ bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
115
+ background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
116
+
117
+ iter_start = torch.cuda.Event(enable_timing = True)
118
+ iter_end = torch.cuda.Event(enable_timing = True)
119
+
120
+ trainCameras = scene.getTrainCameras().copy()
121
+ testCameras = scene.getTestCameras().copy()
122
+ allCameras = trainCameras + testCameras
123
+
124
+ # highresolution index
125
+ highresolution_index = []
126
+ for index, camera in enumerate(trainCameras):
127
+ if camera.image_width >= 800:
128
+ highresolution_index.append(index)
129
+
130
+ gaussians.compute_3D_filter(cameras=trainCameras)
131
+
132
+ viewpoint_stack = None
133
+ ema_loss_for_log = 0.0
134
+ progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
135
+ first_iter += 1
136
+
137
+ for iteration in range(first_iter, opt.iterations + 1):
138
+ if network_gui.conn == None:
139
+ network_gui.try_connect()
140
+ while network_gui.conn != None:
141
+ try:
142
+ net_image_bytes = None
143
+ custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
144
+ if custom_cam != None:
145
+ net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"]
146
+ net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
147
+ network_gui.send(net_image_bytes, dataset.source_path)
148
+ if do_training and ((iteration < int(opt.iterations)) or not keep_alive):
149
+ break
150
+ except Exception as e:
151
+ network_gui.conn = None
152
+
153
+ iter_start.record()
154
+
155
+ gaussians.update_learning_rate(iteration)
156
+
157
+ # Every 1000 its we increase the levels of SH up to a maximum degree
158
+ if iteration % 1000 == 0:
159
+ gaussians.oneupSHdegree()
160
+
161
+ # Pick a random Camera
162
+ if not viewpoint_stack:
163
+ viewpoint_stack = scene.getTrainCameras().copy()
164
+ viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))
165
+
166
+ # Pick a random high resolution camera
167
+ if random.random() < 0.3 and dataset.sample_more_highres:
168
+ viewpoint_cam = trainCameras[highresolution_index[randint(0, len(highresolution_index)-1)]]
169
+
170
+ # Render
171
+ if (iteration - 1) == debug_from:
172
+ pipe.debug = True
173
+
174
+ #TODO ignore border pixels
175
+ if dataset.ray_jitter:
176
+ subpixel_offset = torch.rand((int(viewpoint_cam.image_height), int(viewpoint_cam.image_width), 2), dtype=torch.float32, device="cuda") - 0.5
177
+ # subpixel_offset *= 0.0
178
+ else:
179
+ subpixel_offset = None
180
+
181
+ # Rendering
182
+ render_pkg = render(viewpoint_cam, gaussians, pipe, background, kernel_size=dataset.kernel_size, subpixel_offset=subpixel_offset)
183
+ image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
184
+
185
+ # Loss
186
+ gt_image = viewpoint_cam.original_image.cuda()
187
+
188
+ # sample gt_image with subpixel offset
189
+ if dataset.resample_gt_image:
190
+ gt_image = create_offset_gt(gt_image, subpixel_offset)
191
+
192
+ Ll1 = l1_loss(image, gt_image)
193
+ loss_hr = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
194
+ loss = loss_hr
195
+
196
+ if iteration > opt.iterations - len(trainCameras):
197
+ training_folder = os.path.join(args.output_folder, 'training_views')
198
+ if not os.path.exists(training_folder):
199
+ os.makedirs(training_folder)
200
+ file_name = os.path.join(training_folder, viewpoint_cam.image_name + ".png")
201
+ torchvision.utils.save_image(image, os.path.join(file_name))
202
+
203
+ if args.fidelity_train_en:
204
+ lr_resolution = dataset.resolution * 4
205
+ gt_path = os.path.join(dataset.source_path, f'images_{lr_resolution}', viewpoint_cam.image_name+'.png')
206
+ image_gt_lr = Image.open(gt_path)
207
+ w_lr, h_lr = image_gt_lr.size
208
+ image_gt_lr = PILtoTorch(image_gt_lr, (w_lr, h_lr)).cuda()
209
+ image_lr = torch.nn.functional.interpolate(image.unsqueeze(0), scale_factor=0.25, mode='bicubic', antialias=True).squeeze(0)
210
+ loss_lr = (1.0 - opt.lambda_dssim) * l1_loss(image_lr, image_gt_lr) + opt.lambda_dssim * (1.0 - ssim(image_lr, image_gt_lr))
211
+ loss += loss_lr * args.wt_lr
212
+
213
+ loss.backward()
214
+ iter_end.record()
215
+
216
+ if iteration == opt.iterations - 1:
217
+ training_folder = os.path.join(args.outdir, 'train_results')
218
+ if not os.path.exists(training_folder):
219
+ os.makedirs(training_folder)
220
+
221
+ for i in range(len(trainCameras)):
222
+ cam = trainCameras[i]
223
+ rendering = render(cam, gaussians, pipe, background, kernel_size=dataset.kernel_size, subpixel_offset=subpixel_offset)["render"]
224
+ file_name = os.path.join(training_folder, cam.image_name + f"_step_{3-SR_iter}.png")
225
+ print(file_name)
226
+ torchvision.utils.save_image(rendering, os.path.join(file_name))
227
+
228
+ with torch.no_grad():
229
+ # Progress bar
230
+ ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
231
+ if iteration % 10 == 0:
232
+ progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
233
+ progress_bar.update(10)
234
+ if iteration == opt.iterations:
235
+ progress_bar.close()
236
+
237
+ # Log and save
238
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background, dataset.kernel_size))
239
+ if (iteration in saving_iterations):
240
+ final_iter = (3-SR_iter) * opt.iterations + iteration
241
+ print("\n[ITER {}] Saving Gaussians".format(iteration))
242
+ scene.save(final_iter)
243
+
244
+ # Densification
245
+ if iteration < opt.densify_until_iter:
246
+ # Keep track of max radii in image-space for pruning
247
+ gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
248
+ gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
249
+
250
+ if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0:
251
+ size_threshold = 20 if iteration > opt.opacity_reset_interval else None
252
+ gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
253
+ gaussians.compute_3D_filter(cameras=trainCameras)
254
+
255
+ if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
256
+ gaussians.reset_opacity()
257
+
258
+ if iteration % 100 == 0 and iteration > opt.densify_until_iter:
259
+ if iteration < opt.iterations - 100:
260
+ # don't update in the end of training
261
+ gaussians.compute_3D_filter(cameras=trainCameras)
262
+
263
+ # Optimizer step
264
+ if iteration < opt.iterations:
265
+ gaussians.optimizer.step()
266
+ gaussians.optimizer.zero_grad(set_to_none = True)
267
+
268
+ if (iteration in checkpoint_iterations):
269
+ print("\n[ITER {}] Saving Checkpoint".format(iteration))
270
+ torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth")
271
+
272
+ out_dict = {"scene": scene, "gaussians": gaussians, "tb_writer": tb_writer, "highresolution_index": highresolution_index}
273
+
274
+ return out_dict
275
+
276
+ def load_model_from_config(config, ckpt, verbose=False):
277
+ print(f"Loading model from {ckpt}")
278
+ pl_sd = torch.load(ckpt, map_location="cpu")
279
+ if "global_step" in pl_sd:
280
+ print(f"Global Step: {pl_sd['global_step']}")
281
+ sd = pl_sd["state_dict"]
282
+ model = instantiate_from_config(config.model)
283
+ m, u = model.load_state_dict(sd, strict=False)
284
+ if len(m) > 0 and verbose:
285
+ print("missing keys:")
286
+ print(m)
287
+ if len(u) > 0 and verbose:
288
+ print("unexpected keys:")
289
+ print(u)
290
+
291
+ model.cuda()
292
+ model.eval()
293
+ return model
294
+
295
+ def prepare_model(opt):
296
+ config = OmegaConf.load(f"{opt.config}")
297
+
298
+ local_clip_path = "/home/shulei/3D-SR-AR/others/3DSR/open_clip_pytorch_model.bin"
299
+
300
+ print(f"正在尝试将 CLIP 路径重定向到本地: {local_clip_path}")
301
+
302
+ # 尝试修改 Stable Diffusion 配置中的 CLIP 路径
303
+ # 标准 SD 配置文件结构通常如下:
304
+ try:
305
+ if hasattr(config.model.params, 'cond_stage_config'):
306
+ if hasattr(config.model.params.cond_stage_config, 'params'):
307
+ # 覆盖原本的 "openai/clip-vit-large-patch14"
308
+ config.model.params.cond_stage_config.params.version = local_clip_path
309
+ print(">>> 成功修改 Config 中的 CLIP 路径为本地路径!")
310
+ except Exception as e:
311
+ print(f">>> 修改 CLIP 路径时发生警告 (如果你的模型不需要CLIP则忽略): {e}")
312
+
313
+ model = load_model_from_config(config, f"{opt.ckpt}")
314
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
315
+ model = model.to(device)
316
+ model.configs = config
317
+
318
+ vqgan_config = OmegaConf.load("configs/autoencoder/autoencoder_kl_64x64x4_resi.yaml")
319
+ vq_model = load_model_from_config(vqgan_config, opt.vqgan_ckpt)
320
+ vq_model = vq_model.to(device)
321
+ vq_model.decoder.fusion_w = opt.dec_w
322
+
323
+ model.register_schedule(given_betas=None, beta_schedule="linear", timesteps=1000,
324
+ linear_start=0.00085, linear_end=0.0120, cosine_s=8e-3)
325
+
326
+ out_dict = {'model': model, 'vq_model': vq_model}
327
+ return out_dict
328
+
329
+ def space_timesteps(num_timesteps, section_counts):
330
+ """
331
+ Create a list of timesteps to use from an original diffusion process,
332
+ given the number of timesteps we want to take from equally-sized portions
333
+ of the original process.
334
+ For example, if there's 300 timesteps and the section counts are [10,15,20]
335
+ then the first 100 timesteps are strided to be 10 timesteps, the second 100
336
+ are strided to be 15 timesteps, and the final 100 are strided to be 20.
337
+ If the stride is a string starting with "ddim", then the fixed striding
338
+ from the DDIM paper is used, and only one section is allowed.
339
+ :param num_timesteps: the number of diffusion steps in the original
340
+ process to divide up.
341
+ :param section_counts: either a list of numbers, or a string containing
342
+ comma-separated numbers, indicating the step count
343
+ per section. As a special case, use "ddimN" where N
344
+ is a number of steps to use the striding from the
345
+ DDIM paper.
346
+ :return: a set of diffusion steps from the original process to use.
347
+ """
348
+ if isinstance(section_counts, str):
349
+ if section_counts.startswith("ddim"):
350
+ desired_count = int(section_counts[len("ddim"):])
351
+ for i in range(1, num_timesteps):
352
+ if len(range(0, num_timesteps, i)) == desired_count:
353
+ return set(range(0, num_timesteps, i))
354
+ raise ValueError(
355
+ f"cannot create exactly {num_timesteps} steps with an integer stride"
356
+ )
357
+ section_counts = [int(x) for x in section_counts.split(",")] #[250,]
358
+ size_per = num_timesteps // len(section_counts)
359
+ extra = num_timesteps % len(section_counts)
360
+ start_idx = 0
361
+ all_steps = []
362
+ for i, section_count in enumerate(section_counts):
363
+ size = size_per + (1 if i < extra else 0)
364
+ if size < section_count:
365
+ raise ValueError(
366
+ f"cannot divide section of {size} steps into {section_count}"
367
+ )
368
+ if section_count <= 1:
369
+ frac_stride = 1
370
+ else:
371
+ frac_stride = (size - 1) / (section_count - 1)
372
+ cur_idx = 0.0
373
+ taken_steps = []
374
+ for _ in range(section_count):
375
+ taken_steps.append(start_idx + round(cur_idx))
376
+ cur_idx += frac_stride
377
+ all_steps += taken_steps
378
+ start_idx += size
379
+ return set(all_steps)
380
+
381
+ def read_image(im_path):
382
+ im = np.array(Image.open(im_path).convert("RGB"))
383
+ im = im.astype(np.float32)/255.0
384
+ im = im[None].transpose(0,3,1,2)
385
+ im = (torch.from_numpy(im) - 0.5) / 0.5
386
+ return im.cuda()
387
+
388
+ def visualize_image(latent, rgb_patch, model_dict, out_img_name=None):
389
+ # latent: latent to be decoded
390
+ # rgb_patch: input image rgb patch
391
+ # model_dict: dictionary containing model and vq_model
392
+ # out_img_name: output image name
393
+
394
+ vq_model = model_dict['vq_model']
395
+ model = model_dict['model']
396
+ _, enc_fea_lq = vq_model.encode(rgb_patch)
397
+ x_samples = vq_model.decode(latent * 1. / model.scale_factor, enc_fea_lq)
398
+ x_samples = wavelet_reconstruction(x_samples, rgb_patch)
399
+ im_sr = torch.clamp((x_samples+1.0)/2.0, min=0.0, max=1.0)
400
+ out = Image.fromarray(np.uint8(im_sr[0, ].permute(1,2,0).cpu().numpy()*255))
401
+
402
+ if out_img_name is not None:
403
+ out.save(out_img_name)
404
+ return out
405
+
406
+ def train_proposed(dataset, op, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, args, dataset2=None):
407
+ ####################################
408
+ # Set up for Stable SR
409
+ ####################################
410
+ print('>>>>>>>>>>color correction>>>>>>>>>>>')
411
+ if args.colorfix_type == 'adain':
412
+ print('Use adain color correction')
413
+ elif args.colorfix_type == 'wavelet':
414
+ print('Use wavelet color correction')
415
+ else:
416
+ print('No color correction')
417
+ print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
418
+
419
+ #############################################
420
+ # load StableSR model and scheduler
421
+ #############################################
422
+ # Check input images
423
+ os.makedirs(args.outdir, exist_ok=True)
424
+ outpath = args.outdir
425
+ batch_size = args.n_samples
426
+ images_path_ori = sorted(glob.glob(os.path.join(args.init_img, "*")))
427
+ images_path = np.array(copy.deepcopy(images_path_ori))
428
+
429
+ # Only taking training views for SR
430
+ llffhold = 8
431
+ all_indices = np.arange(len(images_path))
432
+ train_indices = all_indices % llffhold != 0
433
+ sr_indices = all_indices[train_indices]
434
+ images_path = images_path[sr_indices[:]]
435
+ print(f"Found {len(images_path)} inputs.")
436
+
437
+ # Prepare model
438
+ out_dict = prepare_model(args)
439
+ model = out_dict['model']
440
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
441
+ sqrt_alphas_cumprod = copy.deepcopy(model.sqrt_alphas_cumprod)
442
+ sqrt_one_minus_alphas_cumprod = copy.deepcopy(model.sqrt_one_minus_alphas_cumprod)
443
+
444
+ # Modify scheduler for fewer steps
445
+ use_timesteps = set(space_timesteps(1000, [args.ddpm_steps]))
446
+ last_alpha_cumprod = 1.0
447
+ new_betas = []
448
+ timestep_map = []
449
+ for i, alpha_cumprod in enumerate(model.alphas_cumprod):
450
+ if i in use_timesteps:
451
+ new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
452
+ last_alpha_cumprod = alpha_cumprod
453
+ timestep_map.append(i)
454
+ new_betas = [beta.data.cpu().numpy() for beta in new_betas]
455
+ model.register_schedule(given_betas=np.array(new_betas), timesteps=len(new_betas))
456
+ model.num_timesteps = 1000
457
+ model.ori_timesteps = list(use_timesteps)
458
+ model.ori_timesteps.sort()
459
+ model = model.to(device)
460
+
461
+ # Add model and args to out_dict
462
+ out_dict['model'] = model
463
+ out_dict['args'] = args
464
+ precision_scope = autocast if args.precision == "autocast" else nullcontext
465
+
466
+ #############################################
467
+ # Loading scene and Gaussians
468
+ #############################################
469
+ op.densify_until_iter = args.densify_end
470
+ input_dict = prepare_training(dataset, op, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, args, dataset2)
471
+ scene = input_dict["scene"]
472
+ trainCameras = scene.getTrainCameras()
473
+
474
+ if 'llff' in dataset.source_path:
475
+ dir_name = dataset.source_path
476
+ lr_resolution = dataset.resolution * 4
477
+
478
+ orig_folder = os.path.join(dir_name, 'images')
479
+ orig_files = os.listdir(orig_folder)
480
+ orig_files = natsort.natsorted(orig_files)
481
+
482
+ cur_files = os.listdir( os.path.join(dir_name, f'images_{lr_resolution}'))
483
+ cur_files = natsort.natsorted(cur_files)
484
+ #############################################
485
+ # Prepare for SR method
486
+ #############################################
487
+ with model.ema_scope():
488
+ tic = time.time()
489
+ all_samples = list()
490
+ seed_everything(args.seed)
491
+
492
+ imgs_per_batch = batch_size
493
+ loop_img_time = len(images_path) // imgs_per_batch
494
+ one_more_time = (len(images_path) % imgs_per_batch) > 0
495
+ loop_img_time += int(one_more_time)
496
+
497
+ #############################################
498
+ # Loop by denoising steps
499
+ #############################################
500
+ for iteration in range(args.ddpm_steps-1, -1, -1):
501
+ model.cuda()
502
+ out_dict['vq_model'].cuda()
503
+ for loop_id in range(loop_img_time):
504
+ if loop_id == loop_img_time - 1:
505
+ images_path_small = images_path[loop_id*imgs_per_batch:]
506
+ else:
507
+ images_path_small = images_path[loop_id*imgs_per_batch : (loop_id+1)*imgs_per_batch]
508
+
509
+ im_lq_bs = []
510
+ im_path_bs = []
511
+ for img_id in range(len(images_path_small)):
512
+ cur_image = read_image(images_path_small[img_id])
513
+ size_min = min(cur_image.size(-1), cur_image.size(-2))
514
+ upsample_scale = max(args.input_size/size_min,
515
+ args.upscale)
516
+ cur_image = F.interpolate(
517
+ cur_image,
518
+ size=(int(cur_image.size(-2)*upsample_scale),
519
+ int(cur_image.size(-1)*upsample_scale)),
520
+ mode='bicubic',
521
+ )
522
+ cur_image = cur_image.clamp(-1, 1)
523
+ im_lq_bs.append(cur_image) # 1 x c x h x w, [-1, 1]
524
+ im_path_bs.append(images_path_small[img_id]) # 1 x c x h x w, [-1, 1]
525
+ im_lq_bs = torch.cat(im_lq_bs, dim=0)
526
+ ori_h, ori_w = im_lq_bs.shape[2:]
527
+ ref_patch=None
528
+ if not (ori_h % 32 == 0 and ori_w % 32 == 0):
529
+ flag_pad = True
530
+ pad_h = ((ori_h // 32) + 1) * 32 - ori_h
531
+ pad_w = ((ori_w // 32) + 1) * 32 - ori_w
532
+ im_lq_bs = F.pad(im_lq_bs, pad=(0, pad_w, 0, pad_h), mode='reflect')
533
+ else:
534
+ flag_pad = False
535
+
536
+ if iteration != args.ddpm_steps - 1:
537
+ #####################################################
538
+ # Load upsampled image, and encode to latent space
539
+ #####################################################
540
+ imgs = []
541
+ for img_id in range(len(im_path_bs)):
542
+ img_name = str(Path(im_path_bs[img_id]).name)
543
+ basename = os.path.splitext(os.path.basename(img_name))[0]
544
+ training_folder = os.path.join(args.outdir, 'train_results')
545
+ cur_id = loop_id * imgs_per_batch + img_id
546
+ imgpath = os.path.join(training_folder, trainCameras[cur_id].image_name + f"_step_{3-int(iteration)-1}.png")
547
+ cur_image = read_image(imgpath)
548
+
549
+ # Add padding to loaded image
550
+ if not (ori_h % 32 == 0 and ori_w % 32 == 0):
551
+ pad_h = ((ori_h // 32) + 1) * 32 - ori_h
552
+ pad_w = ((ori_w // 32) + 1) * 32 - ori_w
553
+ cur_image = F.pad(cur_image, pad=(0, pad_w, 0, pad_h), mode='reflect')
554
+ imgs.append(cur_image)
555
+ imgs = torch.cat(imgs, dim=0)
556
+
557
+ print("************** Diffusion step ", 3-iteration, "**************")
558
+ with torch.no_grad():
559
+ with precision_scope("cuda"):
560
+ #############################################
561
+ # Start of loop for denoised images
562
+ #############################################
563
+ for img_id in range(len(im_path_bs)):
564
+ #############################################
565
+ # Split image to patches
566
+ #############################################
567
+ if im_lq_bs.shape[2] > args.vqgantile_size or im_lq_bs.shape[3] > args.vqgantile_size:
568
+ im_spliter = ImageSpliterTh(im_lq_bs[img_id].unsqueeze(0), args.vqgantile_size, args.vqgantile_stride, sf=1)
569
+ if iteration != args.ddpm_steps-1:
570
+ im_spliter_x_tilda = ImageSpliterTh(imgs[img_id].unsqueeze(0), args.vqgantile_size, args.vqgantile_stride, sf=1)
571
+ #############################################
572
+ # Loop to process each patch in an image
573
+ #############################################
574
+ for im_lq_pch, index_infos in im_spliter:
575
+ if iteration == args.ddpm_steps-1:
576
+ init_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_pch)) # move to latent space
577
+ text_init = ['']*args.n_samples
578
+ semantic_c = model.cond_stage_model(text_init)
579
+ noise = torch.randn_like(init_latent)
580
+ # If you would like to start from the intermediate steps, you can add noise to LR to the specific steps.
581
+ t = repeat(torch.tensor([999]), '1 -> b', b=im_lq_pch.size(0))
582
+ t = t.to(device).long()
583
+ # Apply the noise to the latent space (sqrt(alpha) * z + sqrt(1-alpha) * x) to create x_T
584
+ x_T = model.q_sample_respace(x_start=init_latent, t=t, sqrt_alphas_cumprod=sqrt_alphas_cumprod,
585
+ sqrt_one_minus_alphas_cumprod=sqrt_one_minus_alphas_cumprod, noise=noise)
586
+ _, x0_head = model.sample_canvas_one_iter(iteration=iteration, cond=semantic_c, struct_cond=init_latent,
587
+ batch_size=im_lq_pch.size(0), timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
588
+ x_T=x_T, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
589
+ batch_size_sample=args.n_samples, return_x0=True)
590
+ else:
591
+ #############################################
592
+ # Encode image to latent space
593
+ #############################################
594
+ im_lq_pch_tilda, index_infos_tilda = next(im_spliter_x_tilda)
595
+ x0_tilda_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_pch_tilda)) # move to latent space
596
+ text_init = ['']*args.n_samples
597
+ semantic_c = model.cond_stage_model(text_init)
598
+ init_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_pch)) # move to latent space
599
+ x_T_1 = model.sample_canvas_one_iter(iteration=iteration+1, cond=semantic_c, struct_cond=init_latent,
600
+ batch_size=im_lq_pch.size(0), timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
601
+ x_T=x_T, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
602
+ batch_size_sample=args.n_samples, return_x0=False, x0_input=x0_tilda_latent)
603
+ _, x0_head = model.sample_canvas_one_iter(iteration=iteration, cond=semantic_c, struct_cond=init_latent,
604
+ batch_size=im_lq_pch.size(0), timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
605
+ x_T=x_T_1, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
606
+ batch_size_sample=args.n_samples, return_x0=True)
607
+ # Decode the latent space to image space
608
+ vq_model = out_dict['vq_model']
609
+ _, enc_fea_lq = vq_model.encode(im_lq_pch)
610
+ x_samples = vq_model.decode(x0_head * 1. / model.scale_factor, enc_fea_lq)
611
+
612
+ if args.colorfix_type == 'adain':
613
+ x_samples = adaptive_instance_normalization(x_samples, im_lq_pch)
614
+ elif args.colorfix_type == 'wavelet':
615
+ x_samples = wavelet_reconstruction(x_samples, im_lq_pch)
616
+ im_spliter.update_gaussian(x_samples, index_infos)
617
+
618
+ im_sr = im_spliter.gather()
619
+ im_sr = torch.clamp((im_sr+1.0)/2.0, min=0.0, max=1.0)
620
+
621
+ if upsample_scale > args.upscale:
622
+ im_sr = F.interpolate(
623
+ im_sr,
624
+ size=(int(im_lq_bs.size(-2)*args.upscale/upsample_scale),
625
+ int(im_lq_bs.size(-1)*args.upscale/upsample_scale)),
626
+ mode='bicubic',)
627
+ im_sr = torch.clamp(im_sr, min=0.0, max=1.0)
628
+
629
+ if flag_pad:
630
+ im_sr = im_sr[:, :, :ori_h, :ori_w, ]
631
+
632
+ im_sr = im_sr.cpu().numpy().transpose(0,2,3,1)*255 # b x h x w x c
633
+ img_name = str(Path(im_path_bs[img_id]).name)
634
+ basename = os.path.splitext(os.path.basename(img_name))[0]
635
+ outpath = str(Path(args.outdir)) + '/' + basename + f'_step_{3-int(iteration)}.png'
636
+ print('Finished:', outpath)
637
+ Image.fromarray(im_sr[0, ].astype(np.uint8)).save(outpath)
638
+
639
+ #######################################################################
640
+ # Take the entire image as SR input (when input image is small enough)
641
+ #######################################################################
642
+ else:
643
+ if iteration == args.ddpm_steps-1:
644
+ init_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_bs[img_id].unsqueeze(0))) # move to latent space
645
+ text_init = ['']*args.n_samples
646
+ semantic_c = model.cond_stage_model(text_init)
647
+ noise = torch.randn_like(init_latent)
648
+ # If you would like to start from the intermediate steps, you can add noise to LR to the specific steps.
649
+ t = repeat(torch.tensor([999]), '1 -> b', b=1)
650
+ t = t.to(device).long()
651
+ x_T = model.q_sample_respace(x_start=init_latent, t=t, sqrt_alphas_cumprod=sqrt_alphas_cumprod, sqrt_one_minus_alphas_cumprod=sqrt_one_minus_alphas_cumprod, noise=noise)
652
+ _, x0_head = model.sample_canvas_one_iter(iteration=iteration, cond=semantic_c, struct_cond=init_latent,
653
+ batch_size=1, timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
654
+ x_T=x_T, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
655
+ batch_size_sample=args.n_samples, return_x0=True)
656
+ else:
657
+ #############################################
658
+ # Encode image to latent space
659
+ #############################################
660
+ x0_tilda_latent = model.get_first_stage_encoding(model.encode_first_stage(imgs[img_id].unsqueeze(0))) # move to latent space
661
+ text_init = ['']*args.n_samples
662
+ semantic_c = model.cond_stage_model(text_init)
663
+ init_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_bs[img_id].unsqueeze(0))) # move to latent space
664
+ # Get x_{t-1}
665
+ x_T_1 = model.sample_canvas_one_iter(iteration=iteration+1, cond=semantic_c, struct_cond=init_latent,
666
+ batch_size=1, timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
667
+ x_T=x_T, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
668
+ batch_size_sample=args.n_samples, return_x0=False, x0_input=x0_tilda_latent)
669
+ # Predict x0_head
670
+ _, x0_head = model.sample_canvas_one_iter(iteration=iteration, cond=semantic_c, struct_cond=init_latent,
671
+ batch_size=1, timesteps=args.ddpm_steps, time_replace=args.ddpm_steps,
672
+ x_T=x_T_1, tile_size=int(args.input_size/8), tile_overlap=args.tile_overlap,
673
+ batch_size_sample=args.n_samples, return_x0=True)
674
+
675
+ vq_model = out_dict['vq_model']
676
+ _, enc_fea_lq = vq_model.encode(im_lq_bs[img_id].unsqueeze(0))
677
+ x_samples = vq_model.decode(x0_head * 1. / model.scale_factor, enc_fea_lq)
678
+ if args.colorfix_type == 'adain':
679
+ x_samples = adaptive_instance_normalization(x_samples, im_lq_bs[img_id].unsqueeze(0))
680
+ elif args.colorfix_type == 'wavelet':
681
+ x_samples = wavelet_reconstruction(x_samples, im_lq_bs[img_id].unsqueeze(0))
682
+ im_sr = torch.clamp((x_samples+1.0)/2.0, min=0.0, max=1.0)
683
+ if flag_pad:
684
+ im_sr = im_sr[:, :, :ori_h, :ori_w, ]
685
+
686
+ im_sr = im_sr.cpu().numpy().transpose(0,2,3,1)*255 # b x h x w x c
687
+ img_name = str(Path(im_path_bs[img_id]).name)
688
+ basename = os.path.splitext(os.path.basename(img_name))[0]
689
+ outpath = str(Path(args.outdir)) + '/' + basename + f'_step_{3-int(iteration)}.png'
690
+ Image.fromarray(im_sr[0, ].astype(np.uint8)).save(outpath)
691
+ print('Finished:', outpath)
692
+
693
+ if iteration == 0:
694
+ final_sr_path = os.path.join(args.outdir, 'final_sr_results')
695
+ os.makedirs(final_sr_path, exist_ok=True)
696
+ outpath = final_sr_path + '/' + basename + f'.png'
697
+ Image.fromarray(im_sr[0, ].astype(np.uint8)).save(outpath)
698
+ #############################################
699
+ # End of loop for denoised images
700
+ #############################################
701
+ print("Moving SD model to CPU to save VRAM for 3DGS...")
702
+ model.cpu()
703
+ out_dict['vq_model'].cpu()
704
+ torch.cuda.empty_cache()
705
+ #############################################
706
+ # Update ground truth image in trainCameras
707
+ #############################################
708
+ for img_id in range(len(trainCameras)):
709
+ # If you read from the saved image, you can use the following code
710
+ # cam_id = loop_id * imgs_per_batch + img_id
711
+
712
+ # if 'llff' in dataset.source_path:
713
+ # matching_index = next((i for i, name in enumerate(orig_files) if trainCameras[img_id].image_name in name), None)
714
+ # img_name = cur_files[matching_index].split('.')[0]
715
+ img_name = trainCameras[img_id].image_name
716
+ img_path = str(Path(args.outdir)) + '/' + img_name + f'_step_{3-int(iteration)}.png'
717
+ img_transfer = Image.open(img_path).convert("RGB")
718
+ width, height = img_transfer.size
719
+ loaded_image = PILtoTorch(img_transfer, (width, height)).cuda()
720
+ # print(img_path)
721
+ # torchvision.utils.save_image(loaded_image, 'vis.png')
722
+ # torchvision.utils.save_image(trainCameras[img_id].original_image, 'vis_2.png')
723
+ trainCameras[img_id].original_image = loaded_image.clone()
724
+
725
+ # #############################################
726
+ # # Train GS
727
+ # #############################################
728
+ input_dict = training_with_iters(input_dict, dataset, op, pipe, testing_iterations, saving_iterations,
729
+ checkpoint_iterations, checkpoint, debug_from, args, dataset2, SR_iter=iteration,)
730
+
731
+ def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from, args, dataset2=None):
732
+ first_iter = 0
733
+ tb_writer = prepare_output_and_logger(dataset)
734
+ gaussians = GaussianModel(dataset.sh_degree)
735
+ scene = Scene(dataset, gaussians)
736
+ gaussians.training_setup(opt)
737
+ if checkpoint:
738
+ (model_params, first_iter) = torch.load(checkpoint)
739
+ gaussians.restore(model_params, opt)
740
+ print(" ----- checkpoint loaded from", checkpoint)
741
+
742
+ bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
743
+ background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
744
+ iter_start = torch.cuda.Event(enable_timing = True)
745
+ iter_end = torch.cuda.Event(enable_timing = True)
746
+
747
+ trainCameras = scene.getTrainCameras().copy()
748
+ testCameras = scene.getTestCameras().copy()
749
+ allCameras = trainCameras + testCameras
750
+
751
+ # highresolution index
752
+ highresolution_index = []
753
+ for index, camera in enumerate(trainCameras):
754
+ if camera.image_width >= 800:
755
+ highresolution_index.append(index)
756
+
757
+ gaussians.compute_3D_filter(cameras=trainCameras)
758
+
759
+ viewpoint_stack = None
760
+ ema_loss_for_log = 0.0
761
+ progress_bar = tqdm(range(first_iter, opt.iterations), desc="Training progress")
762
+
763
+ first_iter += 1
764
+
765
+ num_points = {}
766
+
767
+ for iteration in range(first_iter, opt.iterations + 1):
768
+ if network_gui.conn == None:
769
+ network_gui.try_connect()
770
+ while network_gui.conn != None:
771
+ try:
772
+ net_image_bytes = None
773
+ custom_cam, do_training, pipe.convert_SHs_python, pipe.compute_cov3D_python, keep_alive, scaling_modifer = network_gui.receive()
774
+ if custom_cam != None:
775
+ net_image = render(custom_cam, gaussians, pipe, background, scaling_modifer)["render"]
776
+ net_image_bytes = memoryview((torch.clamp(net_image, min=0, max=1.0) * 255).byte().permute(1, 2, 0).contiguous().cpu().numpy())
777
+ network_gui.send(net_image_bytes, dataset.source_path)
778
+ if do_training and ((iteration < int(opt.iterations)) or not keep_alive):
779
+ break
780
+ except Exception as e:
781
+ network_gui.conn = None
782
+
783
+ iter_start.record()
784
+
785
+ gaussians.update_learning_rate(iteration)
786
+
787
+ # Every 1000 its we increase the levels of SH up to a maximum degree
788
+ if iteration % 1000 == 0:
789
+ gaussians.oneupSHdegree()
790
+
791
+ # Pick a random Camera
792
+ if not viewpoint_stack:
793
+ viewpoint_stack = scene.getTrainCameras().copy()
794
+ pop_id = randint(0, len(viewpoint_stack)-1)
795
+ viewpoint_cam = viewpoint_stack.pop(pop_id)
796
+
797
+ if random.random() < 0.3 and dataset.sample_more_highres:
798
+ viewpoint_cam = trainCameras[highresolution_index[randint(0, len(highresolution_index)-1)]]
799
+
800
+ # Render
801
+ if (iteration - 1) == debug_from:
802
+ pipe.debug = True
803
+
804
+ #TODO ignore border pixels
805
+ if dataset.ray_jitter:
806
+ subpixel_offset = torch.rand((int(viewpoint_cam.image_height), int(viewpoint_cam.image_width), 2), dtype=torch.float32, device="cuda") - 0.5
807
+ # subpixel_offset *= 0.0
808
+ else:
809
+ subpixel_offset = None
810
+ render_pkg = render(viewpoint_cam, gaussians, pipe, background, kernel_size=dataset.kernel_size, subpixel_offset=subpixel_offset)
811
+ image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
812
+
813
+ # Loss
814
+ gt_image = viewpoint_cam.original_image.cuda()
815
+ # sample gt_image with subpixel offset
816
+ if dataset.resample_gt_image:
817
+ gt_image = create_offset_gt(gt_image, subpixel_offset)
818
+ Ll1 = l1_loss(image, gt_image)
819
+ loss = (1.0 - opt.lambda_dssim) * Ll1 + opt.lambda_dssim * (1.0 - ssim(image, gt_image))
820
+ loss.backward()
821
+ iter_end.record()
822
+
823
+ with torch.no_grad():
824
+ # Progress bar
825
+ ema_loss_for_log = 0.4 * loss.item() + 0.6 * ema_loss_for_log
826
+ if iteration % 10 == 0:
827
+ progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"})
828
+ progress_bar.update(10)
829
+ if iteration == opt.iterations:
830
+ progress_bar.close()
831
+
832
+ # Log and save
833
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, (pipe, background, dataset.kernel_size))
834
+ if (iteration in saving_iterations):
835
+ print("\n[ITER {}] Saving Gaussians".format(iteration))
836
+ scene.save(iteration)
837
+ if (iteration == opt.iterations):
838
+ print("\n[ITER {}] Saving Gaussians".format(iteration))
839
+ scene.save(iteration)
840
+ if iteration % 1000 == 0:
841
+ print("\n[ITER {}] Saving Gaussians".format(iteration))
842
+ scene.save(iteration, output_folder="iteration_29000")
843
+
844
+ if not args.freeze_point:
845
+ # Densification
846
+ if iteration < opt.densify_until_iter:
847
+ # Keep track of max radii in image-space for pruning
848
+ gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
849
+ gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
850
+
851
+ if iteration > opt.densify_from_iter and iteration % opt.densification_interval == 0:
852
+ size_threshold = 20 if iteration > opt.opacity_reset_interval else None
853
+ gaussians.densify_and_prune(opt.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
854
+ gaussians.compute_3D_filter(cameras=trainCameras)
855
+
856
+ if iteration % opt.opacity_reset_interval == 0 or (dataset.white_background and iteration == opt.densify_from_iter):
857
+ gaussians.reset_opacity()
858
+
859
+ if iteration % 100 == 0 and iteration > opt.densify_until_iter:
860
+ if iteration < opt.iterations - 100:
861
+ gaussians.compute_3D_filter(cameras=trainCameras)
862
+
863
+ if iteration % 500 == 0:
864
+ num_points[iteration] = gaussians.get_xyz.shape[0]
865
+ print("number of points:", gaussians._xyz.shape[0])
866
+
867
+ if iteration == opt.iterations:
868
+ with open(os.path.join(args.output_folder, "num_points.json"), "w") as f:
869
+ json.dump(num_points, f)
870
+
871
+ # Optimizer step
872
+ if iteration < opt.iterations:
873
+ gaussians.optimizer.step()
874
+ gaussians.optimizer.zero_grad(set_to_none = True)
875
+
876
+ if (iteration in checkpoint_iterations):
877
+ print("\n[ITER {}] Saving Checkpoint".format(iteration))
878
+ torch.save((gaussians.capture(), iteration), scene.model_path + "/chkpnt" + str(iteration) + ".pth")
879
+
880
+ def prepare_output_and_logger(args):
881
+ if not args.model_path:
882
+ if os.getenv('OAR_JOB_ID'):
883
+ unique_str=os.getenv('OAR_JOB_ID')
884
+ else:
885
+ unique_str = str(uuid.uuid4())
886
+ args.model_path = os.path.join("./output/", unique_str[0:10])
887
+
888
+ # Set up output folder
889
+ print("Output folder: {}".format(args.model_path))
890
+ os.makedirs(args.model_path, exist_ok = True)
891
+ with open(os.path.join(args.model_path, "cfg_args"), 'w') as cfg_log_f:
892
+ cfg_log_f.write(str(Namespace(**vars(args))))
893
+
894
+ # Create Tensorboard writer
895
+ tb_writer = None
896
+ if TENSORBOARD_FOUND:
897
+ tb_writer = SummaryWriter(args.model_path)
898
+ else:
899
+ print("Tensorboard not available: not logging progress")
900
+ return tb_writer
901
+
902
+ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs):
903
+ if tb_writer:
904
+ tb_writer.add_scalar('train_loss_patches/l1_loss', Ll1.item(), iteration)
905
+ tb_writer.add_scalar('train_loss_patches/total_loss', loss.item(), iteration)
906
+ tb_writer.add_scalar('iter_time', elapsed, iteration)
907
+
908
+ # Report test and samples of training set
909
+ if iteration in testing_iterations:
910
+ torch.cuda.empty_cache()
911
+ validation_configs = ({'name': 'test', 'cameras' : scene.getTestCameras()},
912
+ {'name': 'train', 'cameras' : [scene.getTrainCameras()[idx % len(scene.getTrainCameras())] for idx in range(5, 30, 5)]})
913
+
914
+ for config in validation_configs:
915
+ if config['cameras'] and len(config['cameras']) > 0:
916
+ l1_test = 0.0
917
+ psnr_test = 0.0
918
+ for idx, viewpoint in enumerate(config['cameras']):
919
+ image = torch.clamp(renderFunc(viewpoint, scene.gaussians, *renderArgs)["render"], 0.0, 1.0)
920
+ gt_image = torch.clamp(viewpoint.original_image.to("cuda"), 0.0, 1.0)
921
+ if tb_writer and (idx < 5):
922
+ tb_writer.add_images(config['name'] + "_view_{}/render".format(viewpoint.image_name), image[None], global_step=iteration)
923
+ if iteration == testing_iterations[0]:
924
+ tb_writer.add_images(config['name'] + "_view_{}/ground_truth".format(viewpoint.image_name), gt_image[None], global_step=iteration)
925
+ l1_test += l1_loss(image, gt_image).mean().double()
926
+ psnr_test += psnr(image, gt_image).mean().double()
927
+ psnr_test /= len(config['cameras'])
928
+ l1_test /= len(config['cameras'])
929
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
930
+ if tb_writer:
931
+ tb_writer.add_scalar(config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
932
+ tb_writer.add_scalar(config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
933
+
934
+ if tb_writer:
935
+ try:
936
+ tb_writer.add_histogram("scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
937
+ except:
938
+ pass
939
+ tb_writer.add_scalar('total_points', scene.gaussians.get_xyz.shape[0], iteration)
940
+ torch.cuda.empty_cache()
941
+
942
+ def parse_args():
943
+ parser = ArgumentParser(description="Training script parameters")
944
+ lp = ModelParams(parser)
945
+ op = OptimizationParams(parser)
946
+ pp = PipelineParams(parser)
947
+ parser.add_argument('--ip', type=str, default="127.0.0.1")
948
+ parser.add_argument('--port', type=int, default=6009)
949
+ parser.add_argument('--debug_from', type=int, default=-1)
950
+ parser.add_argument('--detect_anomaly', action='store_true', default=False)
951
+ parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000])
952
+ parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000])
953
+ parser.add_argument("--quiet", action="store_true")
954
+ parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
955
+ parser.add_argument("--start_checkpoint", type=str, default = None)
956
+ parser.add_argument("--output_folder", type=str)
957
+ parser.add_argument("--load_pretrain", action="store_true")
958
+ parser.add_argument("--freeze_point", action="store_true")
959
+ parser.add_argument("--SR_GS", action="store_true")
960
+ parser.add_argument("--fidelity_train_en", action="store_true")
961
+ parser.add_argument("--prune_init_en", action="store_true")
962
+ parser.add_argument("--seed", type=int, default=999)
963
+ parser.add_argument("--edge_aware_loss_en", action="store_true")
964
+ parser.add_argument("--lpips_wt", type=float, default=0.2)
965
+ parser.add_argument("--wt_lr", type=float, default=0.4)
966
+ parser.add_argument("--densify_end", type=int, default=15000)
967
+ parser.add_argument("--original", action="store_true")
968
+ #############################################
969
+ #### From Stable SR code ####
970
+ #############################################
971
+ parser.add_argument(
972
+ "--init-img",
973
+ type=str,
974
+ nargs="?",
975
+ help="path to the input image",
976
+ default="inputs/user_upload"
977
+ )
978
+ parser.add_argument(
979
+ "--outdir",
980
+ type=str,
981
+ nargs="?",
982
+ help="dir to write results to",
983
+ default="outputs/user_upload"
984
+ )
985
+ parser.add_argument(
986
+ "--ddpm_steps",
987
+ type=int,
988
+ default=1000,
989
+ help="number of ddpm sampling steps",
990
+ )
991
+ parser.add_argument(
992
+ "--n_iter",
993
+ type=int,
994
+ default=1,
995
+ help="sample this often",
996
+ )
997
+ parser.add_argument(
998
+ "--C",
999
+ type=int,
1000
+ default=4,
1001
+ help="latent channels",
1002
+ )
1003
+ parser.add_argument(
1004
+ "--f",
1005
+ type=int,
1006
+ default=8,
1007
+ help="downsampling factor, most often 8 or 16",
1008
+ )
1009
+ parser.add_argument(
1010
+ "--n_samples",
1011
+ type=int,
1012
+ default=1,
1013
+ help="how many samples to produce for each given prompt. A.k.a batch size",
1014
+ )
1015
+ parser.add_argument(
1016
+ "--config",
1017
+ type=str,
1018
+ default="configs/stable-diffusion/v1-inference.yaml",
1019
+ help="path to config which constructs model",
1020
+ )
1021
+ parser.add_argument(
1022
+ "--ckpt",
1023
+ type=str,
1024
+ default="./stablesr_000117.ckpt",
1025
+ help="path to checkpoint of model",
1026
+ )
1027
+ parser.add_argument(
1028
+ "--vqgan_ckpt",
1029
+ type=str,
1030
+ default="./vqgan_cfw_00011.ckpt",
1031
+ help="path to checkpoint of VQGAN model",
1032
+ )
1033
+ parser.add_argument(
1034
+ "--precision",
1035
+ type=str,
1036
+ help="evaluate at this precision",
1037
+ choices=["full", "autocast"],
1038
+ default="autocast"
1039
+ )
1040
+ parser.add_argument(
1041
+ "--dec_w",
1042
+ type=float,
1043
+ default=0.5,
1044
+ help="weight for combining VQGAN and Diffusion",
1045
+ )
1046
+ parser.add_argument(
1047
+ "--tile_overlap",
1048
+ type=int,
1049
+ default=32,
1050
+ help="tile overlap size (in latent)",
1051
+ )
1052
+ parser.add_argument(
1053
+ "--upscale",
1054
+ type=float,
1055
+ default=4.0,
1056
+ help="upsample scale",
1057
+ )
1058
+ parser.add_argument(
1059
+ "--colorfix_type",
1060
+ type=str,
1061
+ default="nofix",
1062
+ help="Color fix type to adjust the color of HR result according to LR input: adain (used in paper); wavelet; nofix",
1063
+ )
1064
+ parser.add_argument(
1065
+ "--vqgantile_stride",
1066
+ type=int,
1067
+ default=1000,
1068
+ help="the stride for tile operation before VQGAN decoder (in pixel)",
1069
+ )
1070
+ parser.add_argument(
1071
+ "--vqgantile_size",
1072
+ type=int,
1073
+ default=1280,
1074
+ help="the size for tile operation before VQGAN decoder (in pixel)",
1075
+ )
1076
+ parser.add_argument(
1077
+ "--input_size",
1078
+ type=int,
1079
+ default=512,
1080
+ help="input size",
1081
+ )
1082
+
1083
+ args = parser.parse_args(sys.argv[1:])
1084
+ args.save_iterations.append(args.iterations)
1085
+
1086
+ return lp, op, pp, args
1087
+
1088
+ if __name__ == "__main__":
1089
+ lp, op, pp, args = parse_args()
1090
+ print("Optimizing " + args.model_path)
1091
+ # Set up random seed
1092
+ torch.manual_seed(args.seed)
1093
+ random.seed(args.seed)
1094
+ np.random.seed(args.seed)
1095
+ torch.backends.cudnn.benchmark = False
1096
+ torch.backends.cudnn.deterministic = True
1097
+ random.seed(args.seed)
1098
+ seed_everything(args.seed)
1099
+
1100
+ # Initialize system state (RNG)
1101
+ safe_state(args.quiet)
1102
+
1103
+ # Start GUI server, configure and run training
1104
+ network_gui.init(args.ip, args.port)
1105
+ torch.autograd.set_detect_anomaly(args.detect_anomaly)
1106
+
1107
+ if args.original:
1108
+ training(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from, args)
1109
+ else:
1110
+ train_proposed(lp.extract(args), op.extract(args), pp.extract(args), args.test_iterations, args.save_iterations, args.checkpoint_iterations, args.start_checkpoint, args.debug_from, args)
1111
+ # All done
1112
+ print("\nTraining complete.")