vaskers5 commited on
Commit
d14ee21
·
verified ·
1 Parent(s): caf79e8

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ competitors_inference_code/DemoFusion/demo.ipynb filter=lfs diff=lfs merge=lfs -text
competitors_inference_code/DemoFusion/demo.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6bbe553656b3d9c863a261a053722930b3b538d5b6b05eac66ff9ae83eaf976
3
+ size 17016845
competitors_inference_code/DemoFusion/figures/gradio_demo.png ADDED

Git LFS Details

  • SHA256: 705de2ac8f94dc7e361323d930c9bb669a99d7015caa13e9919201abd7b9bddf
  • Pointer size: 132 Bytes
  • Size of remote file: 1.03 MB
competitors_inference_code/DemoFusion/figures/gradio_demo_controlnet.png ADDED

Git LFS Details

  • SHA256: 4775087b4a33289b001ed4fbc12a98727a70499c25ca624ca5db004821945473
  • Pointer size: 132 Bytes
  • Size of remote file: 3.81 MB
competitors_inference_code/DemoFusion/figures/gradio_demo_controlnet_img2img.png ADDED

Git LFS Details

  • SHA256: 839f11104086fa21ab80109f2bc22d6cd7919b9df5a93f80af4c1463e94385b0
  • Pointer size: 132 Bytes
  • Size of remote file: 4.44 MB
competitors_inference_code/DemoFusion/figures/gradio_demo_img2img.png ADDED

Git LFS Details

  • SHA256: 1a60e3bfdede9a8b80855e893b528737a4a854c0ddc17be0444a2d6bd75b0599
  • Pointer size: 132 Bytes
  • Size of remote file: 5.41 MB
competitors_inference_code/DemoFusion/figures/illustration.jpg ADDED

Git LFS Details

  • SHA256: 404e03ddf2e01e4112967ce7276db193f4aa66d069076fd1dbf49b0067ca1d16
  • Pointer size: 131 Bytes
  • Size of remote file: 851 kB
competitors_inference_code/DemoFusion/figures/progressive_process.jpg ADDED

Git LFS Details

  • SHA256: 14888eb1f7c01fef168e43f1be3aeaeb44bc30c2ff4ee8d394f9eac802a95f7c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.91 MB
competitors_inference_code/DemoFusion/output_example.png ADDED

Git LFS Details

  • SHA256: f07ec95e64c728eaf49df80921d0740fed21e07a71aa7be37307b8d94c210aaa
  • Pointer size: 133 Bytes
  • Size of remote file: 10.5 MB
competitors_inference_code/LSRNA/figures/comparison.jpg ADDED

Git LFS Details

  • SHA256: 54cf3fe5d652c6eae2a10b43c186b89c9553e12bf0ca103dccd613fae70d86d9
  • Pointer size: 131 Bytes
  • Size of remote file: 191 kB
competitors_inference_code/LSRNA/figures/teaser.jpg ADDED

Git LFS Details

  • SHA256: b35ee21f41dba5c97fadaf8b2aceb0a6bd0d6b1b84afc9525dffc1e9ca039a2f
  • Pointer size: 131 Bytes
  • Size of remote file: 193 kB
competitors_inference_code/LSRNA/lsr/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (291 Bytes). View file
 
competitors_inference_code/LSRNA/lsr/swinir-liif-latent-sdxl.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0996bf5ea25178efb48537213a09283a890bb17cf88389106814df3e56480747
3
+ size 19779045
competitors_inference_code/LSRNA/lsr_training/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ## LSR Training
2
+ This directory includes the training code for LSR.
3
+ To prepare training dataset, please refer to the appendix and the code located at ```datasets/scripts/make_trainset.py```.
4
+ Training process can be executed using the script ```dist.sh```.
5
+
6
+ > **Note:**
7
+ > Please be aware that the training code is not fully refined and may encounter issues depending on your environment.
8
+ > For instance, the training code currently is only compatible with PyTorch version 1.x.x.
9
+ > Should you run into any problems during training, please open an issue or send an email.
competitors_inference_code/LSRNA/lsr_training/core.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #https://github.com/sanghyun-son/bicubic_pytorch
2
+ import math
3
+ import typing
4
+
5
+ import torch
6
+ from torch.nn import functional as F
7
+
8
+ __all__ = ['imresize']
9
+
10
+ _I = typing.Optional[int]
11
+ _D = typing.Optional[torch.dtype]
12
+
13
+ def nearest_contribution(x: torch.Tensor) -> torch.Tensor:
14
+ range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5))
15
+ cont = range_around_0.to(dtype=x.dtype)
16
+ return cont
17
+
18
+ def linear_contribution(x: torch.Tensor) -> torch.Tensor:
19
+ ax = x.abs()
20
+ range_01 = ax.le(1)
21
+ cont = (1 - ax) * range_01.to(dtype=x.dtype)
22
+ return cont
23
+
24
+ def cubic_contribution(x: torch.Tensor, a: float=-0.5) -> torch.Tensor:
25
+ ax = x.abs()
26
+ ax2 = ax * ax
27
+ ax3 = ax * ax2
28
+
29
+ range_01 = ax.le(1)
30
+ range_12 = torch.logical_and(ax.gt(1), ax.le(2))
31
+
32
+ cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
33
+ cont_01 = cont_01 * range_01.to(dtype=x.dtype)
34
+
35
+ cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
36
+ cont_12 = cont_12 * range_12.to(dtype=x.dtype)
37
+
38
+ cont = cont_01 + cont_12
39
+ return cont
40
+
41
+ def gaussian_contribution(x: torch.Tensor, sigma: float=2.0) -> torch.Tensor:
42
+ range_3sigma = (x.abs() <= 3 * sigma + 1)
43
+ # Normalization will be done after
44
+ cont = torch.exp(-x.pow(2) / (2 * sigma**2))
45
+ cont = cont * range_3sigma.to(dtype=x.dtype)
46
+ return cont
47
+
48
+ def discrete_kernel(
49
+ kernel: str, scale: float, antialiasing: bool=True) -> torch.Tensor:
50
+
51
+ '''
52
+ For downsampling with integer scale only.
53
+ '''
54
+ downsampling_factor = int(1 / scale)
55
+ if kernel == 'cubic':
56
+ kernel_size_orig = 4
57
+ else:
58
+ raise ValueError('Pass!')
59
+
60
+ if antialiasing:
61
+ kernel_size = kernel_size_orig * downsampling_factor
62
+ else:
63
+ kernel_size = kernel_size_orig
64
+
65
+ if downsampling_factor % 2 == 0:
66
+ a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size))
67
+ else:
68
+ kernel_size -= 1
69
+ a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1))
70
+
71
+ with torch.no_grad():
72
+ r = torch.linspace(-a, a, steps=kernel_size)
73
+ k = cubic_contribution(r).view(-1, 1)
74
+ k = torch.matmul(k, k.t())
75
+ k /= k.sum()
76
+
77
+ return k
78
+
79
+ def reflect_padding(
80
+ x: torch.Tensor,
81
+ dim: int,
82
+ pad_pre: int,
83
+ pad_post: int) -> torch.Tensor:
84
+
85
+ '''
86
+ Apply reflect padding to the given Tensor.
87
+ Note that it is slightly different from the PyTorch functional.pad,
88
+ where boundary elements are used only once.
89
+ Instead, we follow the MATLAB implementation
90
+ which uses boundary elements twice.
91
+
92
+ For example,
93
+ [a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
94
+ while our implementation yields [a, a, b, c, d, d].
95
+ '''
96
+ b, c, h, w = x.size()
97
+ if dim == 2 or dim == -2:
98
+ padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
99
+ padding_buffer[..., pad_pre:(h + pad_pre), :].copy_(x)
100
+ for p in range(pad_pre):
101
+ padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
102
+ for p in range(pad_post):
103
+ padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
104
+ else:
105
+ padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
106
+ padding_buffer[..., pad_pre:(w + pad_pre)].copy_(x)
107
+ for p in range(pad_pre):
108
+ padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
109
+ for p in range(pad_post):
110
+ padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
111
+
112
+ return padding_buffer
113
+
114
+ def padding(
115
+ x: torch.Tensor,
116
+ dim: int,
117
+ pad_pre: int,
118
+ pad_post: int,
119
+ padding_type: typing.Optional[str]='reflect') -> torch.Tensor:
120
+
121
+ if padding_type is None:
122
+ return x
123
+ elif padding_type == 'reflect':
124
+ x_pad = reflect_padding(x, dim, pad_pre, pad_post)
125
+ else:
126
+ raise ValueError('{} padding is not supported!'.format(padding_type))
127
+
128
+ return x_pad
129
+
130
+ def get_padding(
131
+ base: torch.Tensor,
132
+ kernel_size: int,
133
+ x_size: int) -> typing.Tuple[int, int, torch.Tensor]:
134
+
135
+ base = base.long()
136
+ r_min = base.min()
137
+ r_max = base.max() + kernel_size - 1
138
+
139
+ if r_min <= 0:
140
+ pad_pre = -r_min
141
+ pad_pre = pad_pre.item()
142
+ base += pad_pre
143
+ else:
144
+ pad_pre = 0
145
+
146
+ if r_max >= x_size:
147
+ pad_post = r_max - x_size + 1
148
+ pad_post = pad_post.item()
149
+ else:
150
+ pad_post = 0
151
+
152
+ return pad_pre, pad_post, base
153
+
154
+ def get_weight(
155
+ dist: torch.Tensor,
156
+ kernel_size: int,
157
+ kernel: str='cubic',
158
+ sigma: float=2.0,
159
+ antialiasing_factor: float=1) -> torch.Tensor:
160
+
161
+ buffer_pos = dist.new_zeros(kernel_size, len(dist))
162
+ for idx, buffer_sub in enumerate(buffer_pos):
163
+ buffer_sub.copy_(dist - idx)
164
+
165
+ # Expand (downsampling) / Shrink (upsampling) the receptive field.
166
+ buffer_pos *= antialiasing_factor
167
+ if kernel == 'cubic':
168
+ weight = cubic_contribution(buffer_pos)
169
+ elif kernel == 'gaussian':
170
+ weight = gaussian_contribution(buffer_pos, sigma=sigma)
171
+ else:
172
+ raise ValueError('{} kernel is not supported!'.format(kernel))
173
+
174
+ weight /= weight.sum(dim=0, keepdim=True)
175
+ return weight
176
+
177
+ def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor:
178
+ # Resize height
179
+ if dim == 2 or dim == -2:
180
+ k = (kernel_size, 1)
181
+ h_out = x.size(-2) - kernel_size + 1
182
+ w_out = x.size(-1)
183
+ # Resize width
184
+ else:
185
+ k = (1, kernel_size)
186
+ h_out = x.size(-2)
187
+ w_out = x.size(-1) - kernel_size + 1
188
+
189
+ unfold = F.unfold(x, k)
190
+ unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
191
+ return unfold
192
+
193
+ def reshape_input(
194
+ x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, _I, _I]:
195
+
196
+ if x.dim() == 4:
197
+ b, c, h, w = x.size()
198
+ elif x.dim() == 3:
199
+ c, h, w = x.size()
200
+ b = None
201
+ elif x.dim() == 2:
202
+ h, w = x.size()
203
+ b = c = None
204
+ else:
205
+ raise ValueError('{}-dim Tensor is not supported!'.format(x.dim()))
206
+
207
+ x = x.view(-1, 1, h, w)
208
+ return x, b, c, h, w
209
+
210
+ def reshape_output(
211
+ x: torch.Tensor, b: _I, c: _I) -> torch.Tensor:
212
+
213
+ rh = x.size(-2)
214
+ rw = x.size(-1)
215
+ # Back to the original dimension
216
+ if b is not None:
217
+ x = x.view(b, c, rh, rw) # 4-dim
218
+ else:
219
+ if c is not None:
220
+ x = x.view(c, rh, rw) # 3-dim
221
+ else:
222
+ x = x.view(rh, rw) # 2-dim
223
+
224
+ return x
225
+
226
+ def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]:
227
+ if x.dtype != torch.float32 or x.dtype != torch.float64:
228
+ dtype = x.dtype
229
+ x = x.float()
230
+ else:
231
+ dtype = None
232
+
233
+ return x, dtype
234
+
235
+ def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor:
236
+ if dtype is not None:
237
+ if not dtype.is_floating_point:
238
+ x = x.round()
239
+ # To prevent over/underflow when converting types
240
+ if dtype is torch.uint8:
241
+ x = x.clamp(0, 255)
242
+
243
+ x = x.to(dtype=dtype)
244
+
245
+ return x
246
+
247
+ def resize_1d(
248
+ x: torch.Tensor,
249
+ dim: int,
250
+ size: typing.Optional[int],
251
+ scale: typing.Optional[float],
252
+ kernel: str='cubic',
253
+ sigma: float=2.0,
254
+ padding_type: str='reflect',
255
+ antialiasing: bool=True) -> torch.Tensor:
256
+
257
+ '''
258
+ Args:
259
+ x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W).
260
+ dim (int):
261
+ scale (float):
262
+ size (int):
263
+
264
+ Return:
265
+ '''
266
+ # Identity case
267
+ if scale == 1:
268
+ return x
269
+
270
+ # Default bicubic kernel with antialiasing (only when downsampling)
271
+ if kernel == 'cubic':
272
+ kernel_size = 4
273
+ else:
274
+ kernel_size = math.floor(6 * sigma)
275
+
276
+ if antialiasing and (scale < 1):
277
+ antialiasing_factor = scale
278
+ kernel_size = math.ceil(kernel_size / antialiasing_factor)
279
+ else:
280
+ antialiasing_factor = 1
281
+
282
+ # We allow margin to both sizes
283
+ kernel_size += 2
284
+
285
+ # Weights only depend on the shape of input and output,
286
+ # so we do not calculate gradients here.
287
+ with torch.no_grad():
288
+ pos = torch.linspace(
289
+ 0, size - 1, steps=size, dtype=x.dtype, device=x.device,
290
+ )
291
+ pos = (pos + 0.5) / scale - 0.5
292
+ base = pos.floor() - (kernel_size // 2) + 1
293
+ dist = pos - base
294
+ weight = get_weight(
295
+ dist,
296
+ kernel_size,
297
+ kernel=kernel,
298
+ sigma=sigma,
299
+ antialiasing_factor=antialiasing_factor,
300
+ )
301
+ pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
302
+
303
+ # To backpropagate through x
304
+ x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
305
+ unfold = reshape_tensor(x_pad, dim, kernel_size)
306
+ # Subsampling first
307
+ if dim == 2 or dim == -2:
308
+ sample = unfold[..., base, :]
309
+ weight = weight.view(1, kernel_size, sample.size(2), 1)
310
+ else:
311
+ sample = unfold[..., base]
312
+ weight = weight.view(1, kernel_size, 1, sample.size(3))
313
+
314
+ # Apply the kernel
315
+ x = sample * weight
316
+ x = x.sum(dim=1, keepdim=True)
317
+ return x
318
+
319
+ def downsampling_2d(
320
+ x: torch.Tensor,
321
+ k: torch.Tensor,
322
+ scale: int,
323
+ padding_type: str='reflect') -> torch.Tensor:
324
+
325
+ c = x.size(1)
326
+ k_h = k.size(-2)
327
+ k_w = k.size(-1)
328
+
329
+ k = k.to(dtype=x.dtype, device=x.device)
330
+ k = k.view(1, 1, k_h, k_w)
331
+ k = k.repeat(c, c, 1, 1)
332
+ e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False)
333
+ e = e.view(c, c, 1, 1)
334
+ k = k * e
335
+
336
+ pad_h = (k_h - scale) // 2
337
+ pad_w = (k_w - scale) // 2
338
+ x = padding(x, -2, pad_h, pad_h, padding_type=padding_type)
339
+ x = padding(x, -1, pad_w, pad_w, padding_type=padding_type)
340
+ y = F.conv2d(x, k, padding=0, stride=scale)
341
+ return y
342
+
343
+ def imresize(
344
+ x: torch.Tensor,
345
+ scale: typing.Optional[float]=None,
346
+ sizes: typing.Optional[typing.Tuple[int, int]]=None,
347
+ kernel: typing.Union[str, torch.Tensor]='cubic',
348
+ sigma: float=2,
349
+ rotation_degree: float=0,
350
+ padding_type: str='reflect',
351
+ antialiasing: bool=True) -> torch.Tensor:
352
+
353
+ '''
354
+ Args:
355
+ x (torch.Tensor):
356
+ scale (float):
357
+ sizes (tuple(int, int)):
358
+ kernel (str, default='cubic'):
359
+ sigma (float, default=2):
360
+ rotation_degree (float, default=0):
361
+ padding_type (str, default='reflect'):
362
+ antialiasing (bool, default=True):
363
+
364
+ Return:
365
+ torch.Tensor:
366
+ '''
367
+
368
+ if scale is None and sizes is None:
369
+ raise ValueError('One of scale or sizes must be specified!')
370
+ if scale is not None and sizes is not None:
371
+ raise ValueError('Please specify scale or sizes to avoid conflict!')
372
+
373
+ x, b, c, h, w = reshape_input(x)
374
+
375
+ if sizes is None:
376
+ '''
377
+ # Check if we can apply the convolution algorithm
378
+ scale_inv = 1 / scale
379
+ if isinstance(kernel, str) and scale_inv.is_integer():
380
+ kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing)
381
+ elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer():
382
+ raise ValueError(
383
+ 'An integer downsampling factor '
384
+ 'should be used with a predefined kernel!'
385
+ )
386
+ '''
387
+ # Determine output size
388
+ sizes = (math.ceil(h * scale), math.ceil(w * scale))
389
+ scales = (scale, scale)
390
+
391
+ if scale is None:
392
+ scales = (sizes[0] / h, sizes[1] / w)
393
+
394
+ x, dtype = cast_input(x)
395
+
396
+ if isinstance(kernel, str):
397
+ # Shared keyword arguments across dimensions
398
+ kwargs = {
399
+ 'kernel': kernel,
400
+ 'sigma': sigma,
401
+ 'padding_type': padding_type,
402
+ 'antialiasing': antialiasing,
403
+ }
404
+ # Core resizing module
405
+ x = resize_1d(x, -2, size=sizes[0], scale=scales[0], **kwargs)
406
+ x = resize_1d(x, -1, size=sizes[1], scale=scales[1], **kwargs)
407
+ elif isinstance(kernel, torch.Tensor):
408
+ x = downsampling_2d(x, kernel, scale=int(1 / scale))
409
+
410
+ x = reshape_output(x, b, c)
411
+ x = cast_output(x, dtype)
412
+ return x
competitors_inference_code/LSRNA/lsr_training/datasets/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .datasets import register, make
2
+ from . import image_folder
3
+ from . import wrappers
competitors_inference_code/LSRNA/lsr_training/datasets/image_folder.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+
4
+ import numpy as np
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+ from torchvision import transforms
8
+ from datasets import register
9
+ from utils.utils_io import *
10
+
11
+
12
+ @register('image-folder')
13
+ class ImageFolder(Dataset):
14
+
15
+ def __init__(self, hr_path, lr_path, first_k=None, repeat=1, scales=[2,3,4]):
16
+ self.repeat = repeat
17
+ self.files = sorted(os.listdir(hr_path))
18
+ if first_k is not None:
19
+ self.files = self.files[:first_k]
20
+ self.hr_path = hr_path
21
+ self.lr_path = lr_path
22
+ self.scales = scales
23
+
24
+ def __len__(self):
25
+ return len(self.files) * self.repeat
26
+
27
+ def __getitem__(self, idx):
28
+ filename = self.files[idx % len(self.files)]
29
+ hr_path = os.path.join(self.hr_path, filename)
30
+
31
+ lr_paths = []
32
+ for scale in self.scales:
33
+ lr_path = os.path.join(self.lr_path, f'X{scale}', filename)
34
+ lr_paths.append(lr_path)
35
+ return hr_path, lr_paths
competitors_inference_code/LSRNA/lsr_training/train.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings("ignore")
3
+ import os, sys
4
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
5
+
6
+ from functools import partial
7
+ import argparse
8
+ import yaml
9
+ import builtins
10
+
11
+ from utils import *
12
+ import datasets
13
+ import models
14
+ from tqdm import tqdm
15
+
16
+ import numpy as np
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ from torch.utils.data import DataLoader
21
+ from torch.utils.data.distributed import DistributedSampler
22
+ from diffusers import StableDiffusionXLPipeline
23
+
24
+
25
+ def prepare_training(config, log):
26
+ resume_path = config['resume_path']
27
+ resume = os.path.exists(resume_path)
28
+
29
+ if resume:
30
+ sv_file = torch.load(resume_path, map_location=config['map_loc'])
31
+ iter_start = sv_file['iter']+1
32
+ if iter_start <= config['iter_max']//100:
33
+ resume = False
34
+ else:
35
+ log('Model resumed from: {} (prev_iter: {})'.format(resume_path, sv_file['iter']))
36
+ model = models.make(sv_file['model'], load_sd=True).cuda()
37
+ optimizer, lr_scheduler = make_optim_sched(model.parameters(),
38
+ sv_file['optimizer'], sv_file['lr_scheduler'], load_sd=True)
39
+
40
+ if not resume:
41
+ if config.get('init_path'):
42
+ log('Model init from: {}'.format(config['init_path']))
43
+ sv_file = torch.load(config['init_path'], map_location=config['map_loc'])
44
+ model = models.make(sv_file['model'], load_sd=True).cuda()
45
+ else:
46
+ log('Loading new model ...')
47
+ model = models.make(config['model']).cuda()
48
+ optimizer, lr_scheduler = make_optim_sched(model.parameters(),
49
+ config['optimizer'], config['lr_scheduler'])
50
+ iter_start = 1
51
+ log('#params={}'.format(compute_num_params(model, text=True)))
52
+
53
+ # load vae
54
+ sd_ckpt = config['sd_ckpt']
55
+ pipeline = StableDiffusionXLPipeline.from_pretrained(sd_ckpt)
56
+ pipeline.enable_vae_tiling()
57
+ vae = pipeline.vae.cuda() # eval mode, float32, i/o range [-1,1]
58
+ return model, optimizer, lr_scheduler, iter_start, vae
59
+
60
+
61
+ def make_train_loader(config):
62
+ spec = config['train_dataset']
63
+ seed = 0 if not config['seed'] else config['seed']
64
+ dataset = datasets.make(spec['dataset'])
65
+ dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})
66
+
67
+ assert spec['batch_size'] % config['world_size'] == 0
68
+ batch_size = spec['batch_size'] // config['world_size']
69
+ assert spec['num_workers'] % config['world_size'] == 0
70
+ num_workers = spec['num_workers'] // config['world_size']
71
+
72
+ sampler = DistributedSampler(dataset, shuffle=True, seed=seed)
73
+ data_loader = DataLoader(dataset, batch_size=batch_size, drop_last=True,
74
+ shuffle=False, pin_memory=True, num_workers=num_workers, sampler=sampler)
75
+ return data_loader, sampler
76
+
77
+
78
+ def valid(model, config, vae):
79
+ valid_path = config['valid_path']
80
+ valid_data_name = config['valid_path'].split('/')[-2]
81
+ scale = 2 # fixed
82
+ model.eval()
83
+
84
+ filenames = sorted(os.listdir(valid_path))
85
+ for filename in tqdm(filenames, leave=True, desc=f'valid (x{scale})'):
86
+ hr_file = os.path.join(valid_path, filename)
87
+ hr = np.array(Image.open(hr_file).convert('RGB')) / 255.
88
+ hr = torch.from_numpy(hr).permute(2,0,1).float().unsqueeze(0).cuda()
89
+
90
+ with torch.no_grad():
91
+ # crop to divisible size
92
+ H,W = hr.shape[-2:]
93
+ H,W = H//8*8, W//8*8
94
+ hr = hr[:,:H,:W]
95
+ hr = (hr - 0.5) * 2 # normalize to [-1,1]
96
+
97
+ hr_latent = vae.encode(hr).latent_dist.mode() * vae.config.scaling_factor
98
+ H,W = hr_latent.shape[-2:]
99
+ H,W = H*scale, W*scale
100
+
101
+ coord = make_coord((H,W), flatten=False, device='cuda').unsqueeze(0)
102
+ cell = torch.ones_like(coord)
103
+ cell[:,:,:,0] *= 2/H
104
+ cell[:,:,:,1] *= 2/W
105
+
106
+ pred_latent = model(hr_latent, coord, cell)
107
+ pred = vae.decode(pred_latent / vae.config.scaling_factor, return_dict=False)[0]
108
+
109
+ # denormalize
110
+ pred = pred / 2 + 0.5
111
+ hr = hr / 2 + 0.5
112
+
113
+ save_dir = os.path.join(config['save_path'], 'valid', valid_data_name, f'X{scale}')
114
+ os.makedirs(save_dir, exist_ok=True)
115
+ filename = filename.split('.')[0] # w/o extension
116
+ Image.fromarray(tensor2numpy(pred)).save(os.path.join(save_dir, f'{filename}_pred.png'))
117
+ Image.fromarray(tensor2numpy(hr)).save(os.path.join(save_dir, f'{filename}_hr.png'))
118
+
119
+
120
+ def main():
121
+ # get options
122
+ parser = argparse.ArgumentParser()
123
+ parser.add_argument('--config', type=str, required=True)
124
+ parser.add_argument('--launcher', default='pytorch', help='job launcher')
125
+ parser.add_argument('--local_rank', type=int, default=0)
126
+ args = parser.parse_args()
127
+
128
+ # distributed setting
129
+ init_dist('pytorch')
130
+ rank, world_size = get_dist_info()
131
+
132
+ # load logger
133
+ save_path = os.path.join('save', args.config.split('/')[-1][:-len('.yaml')])
134
+ logger = Logger()
135
+ logger.set_save_path(save_path, remove=False)
136
+ if rank > 0:
137
+ builtins.print = lambda *args, **kwargs: None
138
+ logger.disable()
139
+ log = logger.log
140
+
141
+ # load config
142
+ config = load_config(args.config)
143
+ config['world_size'] = world_size
144
+ if config['seed'] is not None:
145
+ set_seed(config['seed'])
146
+ if rank == 0:
147
+ os.makedirs(save_path, exist_ok=True)
148
+ with open(os.path.join(save_path, 'config.yaml'), 'w') as f:
149
+ yaml.dump(config, f, sort_keys=False)
150
+ log('Config loaded: {}'.format(args.config))
151
+ config['rank'] = rank
152
+ config['map_loc'] = f'cuda:{rank}'
153
+
154
+ # prepare training
155
+ model, optimizer, lr_scheduler, iter_start, vae = prepare_training(config, log)
156
+ model = nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
157
+ train_loader, train_sampler = make_train_loader(config)
158
+
159
+ if rank == 0:
160
+ assert os.path.exists(config['valid_path'])
161
+ timer = Timer()
162
+ train_loss = Averager()
163
+ t_iter_start = timer.t()
164
+
165
+ iter_cur = iter_start
166
+ iter_max = config['iter_max']
167
+ iter_print = config['iter_print']
168
+ iter_val = config['iter_val']
169
+ iter_save = config['iter_save']
170
+
171
+ loss_fn = nn.L1Loss()
172
+ while True:
173
+ train_sampler.set_epoch(iter_cur) # instead of epoch
174
+ for batch in train_loader: # process single iteration
175
+ for key, value in batch.items():
176
+ batch[key] = value.cuda()
177
+ model.train()
178
+ optimizer.zero_grad()
179
+
180
+ hr, lr = batch['hr'], batch['lr']
181
+ assert hr.shape[1] == lr.shape[1] and hr.shape[1] == 4
182
+ coord, cell = batch['coord'], batch['cell']
183
+ pred = model(lr, coord, cell)
184
+ loss = loss_fn(pred, hr)
185
+
186
+ loss.backward()
187
+ optimizer.step()
188
+ lr_scheduler.step()
189
+
190
+ if rank == 0:
191
+ train_loss.add(loss.item())
192
+ cond1 = (iter_cur % iter_print == 0)
193
+ cond2 = (iter_cur % iter_save == 0)
194
+ cond3 = (iter_cur % iter_val == 0)
195
+
196
+ if cond1 or cond2 or cond3:
197
+ model_ = model.module if hasattr(model, 'module') else model
198
+ if cond1 or cond2:
199
+ # save current model state
200
+ model_spec = config['model']
201
+ model_spec['sd'] = model_.state_dict()
202
+ optimizer_spec = config['optimizer']
203
+ optimizer_spec['sd'] = optimizer.state_dict()
204
+ lr_scheduler_spec = config['lr_scheduler']
205
+ lr_scheduler_spec['sd'] = lr_scheduler.state_dict()
206
+ sv_file = {
207
+ 'model': model_spec,
208
+ 'optimizer': optimizer_spec,
209
+ 'lr_scheduler': lr_scheduler_spec,
210
+ 'iter': iter_cur
211
+ }
212
+ if cond1:
213
+ log_info = ['iter {}/{}'.format(iter_cur, iter_max)]
214
+ log_info.append('train: loss={:.4f}'.format(train_loss.item()))
215
+ log_info.append('lr: {:.4e}'.format(lr_scheduler.get_last_lr()[0]))
216
+
217
+ t = timer.t()
218
+ prog = (iter_cur - iter_start + 1) / (iter_max - iter_start + 1)
219
+ t_iter = time_text(t - t_iter_start)
220
+ t_elapsed, t_all = time_text(t), time_text(t / prog)
221
+ log_info.append('{} {}/{}'.format(t_iter, t_elapsed, t_all))
222
+ log(', '.join(log_info))
223
+ train_loss = Averager()
224
+ t_iter_start = timer.t()
225
+ torch.save(sv_file, os.path.join(config['save_path'], 'iter_last.pth'))
226
+ if cond2:
227
+ torch.save(sv_file, os.path.join(config['save_path'], 'iter_{}.pth'.format(iter_cur)))
228
+ if cond3: # validation
229
+ valid(model_, config, vae=vae)
230
+
231
+
232
+ if iter_cur == iter_max:
233
+ log('Finish training.')
234
+ return
235
+ iter_cur += 1
236
+
237
+ if __name__ == '__main__':
238
+ main()