File size: 9,740 Bytes
95b1715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0340b9d
 
 
 
95b1715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import os
import json
import wandb
import time

import torch
import numpy as np
from abc import abstractmethod 
from torch.utils.data import DataLoader
from collections import defaultdict
from tqdm.auto import tqdm
from io import BytesIO
import torch.nn.functional as F

from PIL import Image
from pathlib import Path

from utils.class_registry import ClassRegistry
from datasets.datasets import ImageDataset
from datasets.transforms import transforms_registry
from utils.common_utils import tensor2im
from runners.base_runner import BaseRunner
from training.loggers import BaseTimer
from utils.common_utils import get_keys
try:
    from metrics.metrics import metrics_registry
except Exception:
    metrics_registry = {}


inference_runner_registry = ClassRegistry()


@inference_runner_registry.add_to_registry(name="base_inference_runner")
class BaseInferenceRunner(BaseRunner):
    def run(self):
        self.run_inversion()
        self.run_editing()

    @torch.inference_mode()
    def run_inversion(self):
        output_inv_dir =  Path(self.config.exp.output_dir) / "inversion"
        output_inv_dir.mkdir(parents=True, exist_ok=True)


        transform_dict = transforms_registry[self.config.data.transform]().get_transforms()
        dataset = ImageDataset(self.config.data.inference_dir, transform_dict["test"])
        dataloader = DataLoader(
            dataset,
            batch_size=self.config.model.batch_size,
            shuffle=False,
            num_workers=self.config.model.workers,
        )

        self.method_results = []
        self.paths = dataset.paths
        self.method.eval()

        print("Start inversion")
        global_i = 0

        for input_batch in tqdm(dataloader):
            input_cuda = input_batch.to(self.device).float()

            images, result_batch = self._run_on_batch(input_cuda)
            result_batch["img_names"] = []
            
            for tensor in images:
                image = tensor2im(tensor)
                img_name = os.path.basename(dataset.paths[global_i])
                result_batch["img_names"].append(img_name)
                image.save(output_inv_dir / img_name)
                global_i += 1

            self.method_results.append(result_batch)


    @torch.inference_mode()
    def run_editing(self):
        editing_data = self.config.inference.editings_data

        for editing_name, editing_degrees in editing_data.items():
            print(f"Sart editing for {editing_name} direction with degrees {editing_degrees}")
            output_edit_dir =  Path(self.config.exp.output_dir) / editing_name
            output_edit_paths = []

            for editing_degree in editing_degrees:
                editing_dir_degree_pth = output_edit_dir / f"edit_power_{editing_degree:.4f}"
                editing_dir_degree_pth.mkdir(parents=True, exist_ok=True)
                output_edit_paths.append(editing_dir_degree_pth)

            for method_res_batch in tqdm(self.method_results):
                edited_imgs_batch = self._run_editing_on_batch(
                    method_res_batch, editing_name, editing_degrees
                )

                for edited_imgs, img_name in zip(edited_imgs_batch, method_res_batch["img_names"]):
                    for edited_img_tensor, save_dir in zip(edited_imgs, output_edit_paths):
                        edited_img = tensor2im(edited_img_tensor)
                        edited_img.save(save_dir / img_name)


    @abstractmethod
    def _run_on_batch(self, inputs):
        raise NotImplementedError()

    @abstractmethod
    def _run_editing_on_batch(self, method_res_batch, editing_name, editing_degrees):
        raise NotImplementedError()


@inference_runner_registry.add_to_registry(name="fse_inference_runner")
class FSEInferenceRunner(BaseInferenceRunner):
    def _run_on_batch(self, inputs):
        images, w_recon, fused_feat, predicted_feat = self.method(inputs, return_latents=True)
        
        x = F.interpolate(inputs, size=(256, 256), mode="bilinear", align_corners=False)
        w_e4e = self.method.e4e_encoder(x)
        w_e4e = w_e4e + self.method.latent_avg

        result_batch = {
            "latents": w_recon, 
            "fused_feat": fused_feat, 
            "predicted_feat": predicted_feat, 
            "w_e4e": w_e4e,
            "inputs": inputs.cpu()
        }
        
        return images, result_batch
          
    def _run_editing_on_batch(self, method_res_batch, editing_name, editing_degrees, mask=None, return_e4e=False):
        orig_latents = method_res_batch["latents"]
        edited_images = []
        n_iter = 1e5

        for i, latent in enumerate(orig_latents):
            edited_latents = self.get_edited_latent(
                latent.unsqueeze(0), 
                editing_name, 
                editing_degrees, 
                method_res_batch["inputs"][i].unsqueeze(0)
            )
            
            w_e4e = method_res_batch["w_e4e"][i].unsqueeze(0)
            edited_w_e4e = self.get_edited_latent(
                w_e4e, 
                editing_name, 
                editing_degrees, 
                method_res_batch["inputs"][i].unsqueeze(0)
            )

            if edited_latents is None or edited_w_e4e is None:
                print(f"WARNING, skip editing {editing_name}")
                continue

            is_stylespace = isinstance(edited_latents, tuple)
            if not is_stylespace:
                edited_latents = torch.cat(edited_latents, dim=0).unsqueeze(0)
                edited_w_e4e = torch.cat(edited_w_e4e, dim=0).unsqueeze(0)

            w_e4e = w_e4e.repeat(len(editing_degrees), 1, 1)  # bs = len(editing_degrees)
            w_latent = latent.unsqueeze(0).repeat(len(editing_degrees), 1, 1)

            e4e_inv, fs_x = self.method.decoder(
                [w_e4e],
                input_is_latent=True,
                randomize_noise=False,
                return_latents=False,
                return_features=True,
                early_stop=None if return_e4e else 64
            )

            e4e_edit, fs_y = self.method.decoder(
                edited_w_e4e,
                input_is_latent=True,
                randomize_noise=False,
                return_latents=False,
                is_stylespace=is_stylespace,
                return_features=True,
                early_stop=None if return_e4e else 64
            )

            delta = fs_x[9] - fs_y[9]

            if mask is not None:
                delta_mask = mask[i][0].unsqueeze(0).repeat(512, 1, 1).unsqueeze(0)
                delta_mask = F.interpolate(delta_mask, size=(64, 64), mode="bilinear", align_corners=False)
                delta = delta *  (1 - delta_mask)

            fused_feat = method_res_batch["fused_feat"][i].to(self.device)
            fused_feat = fused_feat.repeat(len(editing_degrees), 1, 1, 1)


            edited_feat = self.method.encoder(torch.cat([fused_feat, delta], dim=1))  # encoder == feature editor
            edit_features = [None] * 9 + [edited_feat] + [None] * (17 - 9)

            image_edits, _ = self.method.decoder(
                edited_latents,
                input_is_latent=True,
                new_features=edit_features,
                feature_scale=min(1.0, 0.0001 * n_iter),
                is_stylespace=is_stylespace,
                randomize_noise=False
            )

            edited_images.append(image_edits)
        edited_images = torch.stack(edited_images)

        if return_e4e:
            return edited_images, e4e_inv, e4e_edit

        return edited_images  # : torch.tensor(batch_size x len(editing_degrees) x 1024 x 1024)


@inference_runner_registry.add_to_registry(name="fse_inverter_inference_runner")
class FSEInverterInferenceRunner(BaseInferenceRunner):
    def _run_on_batch(self, inputs):
        images, w_recon, fused_feat, predicted_feat = self.method(inputs, return_latents=True)
        result_batch = {
            "latents": w_recon, 
            "fused_feat": fused_feat, 
            "predicted_feat": predicted_feat,
            "inputs": inputs.cpu()
        }
        
        return images, result_batch
          
    def _run_editing_on_batch(self, method_res_batch, editing_name, editing_degrees):
        orig_latents = method_res_batch["latents"]
        edited_images = []
        n_iter = 1e5

        for i, latent in enumerate(orig_latents):
            edited_latents = self.get_edited_latent(
                latent.unsqueeze(0), 
                editing_name, 
                editing_degrees, 
                method_res_batch["inputs"][i].unsqueeze(0)
            )

            if edited_latents is None:
                print(f"WARNING, skip editing {editing_name}")
                continue

            is_stylespace = isinstance(edited_latents, tuple)
            if not is_stylespace:
                edited_latents = torch.cat(edited_latents, dim=0).unsqueeze(0)

            w_latent = latent.unsqueeze(0).repeat(len(editing_degrees), 1, 1)

            fused_feat = method_res_batch["fused_feat"][i].to(self.device)
            fused_feat = fused_feat.repeat(len(editing_degrees), 1, 1, 1)

            edit_features = [None] * 9 + [fused_feat] + [None] * (17 - 9)

            image_edits, _ = self.method.decoder(
                edited_latents,
                input_is_latent=True,
                new_features=edit_features,
                feature_scale=min(1.0, 0.0001 * n_iter),
                is_stylespace=is_stylespace,
                randomize_noise=False
            )

            edited_images.append(image_edits)
        edited_images = torch.stack(edited_images)

        return edited_images  # : torch.tensor(batch_size x len(powers) x pics)