JustDelet commited on
Commit
6e7213c
·
verified ·
1 Parent(s): 14d44fe

Upload legacy.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. legacy.py +327 -0
legacy.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ """Converting legacy network pickle into the new format."""
10
+
11
+ import click
12
+ import pickle
13
+ import re
14
+ import copy
15
+ import numpy as np
16
+ import torch
17
+ import dnnlib
18
+ from torch_utils import misc
19
+
20
+ #----------------------------------------------------------------------------
21
+
22
+ def load_network_pkl(f, force_fp16=False):
23
+ data = _LegacyUnpickler(f).load()
24
+
25
+ # Legacy TensorFlow pickle => convert.
26
+ if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
27
+ tf_G, tf_D, tf_Gs = data
28
+ G = convert_tf_generator(tf_G)
29
+ D = convert_tf_discriminator(tf_D)
30
+ G_ema = convert_tf_generator(tf_Gs)
31
+ data = dict(G=G, D=D, G_ema=G_ema)
32
+
33
+ # Add missing fields.
34
+ if 'training_set_kwargs' not in data:
35
+ data['training_set_kwargs'] = None
36
+ if 'augment_pipe' not in data:
37
+ data['augment_pipe'] = None
38
+
39
+ # Validate contents.
40
+ assert isinstance(data['G'], torch.nn.Module)
41
+ assert isinstance(data['D'], torch.nn.Module)
42
+ assert isinstance(data['G_ema'], torch.nn.Module)
43
+ assert isinstance(data['training_set_kwargs'], (dict, type(None)))
44
+ assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
45
+
46
+ # Force FP16.
47
+ if force_fp16:
48
+ for key in ['G', 'D', 'G_ema']:
49
+ old = data[key]
50
+ kwargs = copy.deepcopy(old.init_kwargs)
51
+ fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
52
+ fp16_kwargs.num_fp16_res = 4
53
+ fp16_kwargs.conv_clamp = 256
54
+ if kwargs != old.init_kwargs:
55
+ new = type(old)(**kwargs).eval().requires_grad_(False)
56
+ misc.copy_params_and_buffers(old, new, require_all=True)
57
+ data[key] = new
58
+ return data
59
+
60
+ #----------------------------------------------------------------------------
61
+
62
+ class _TFNetworkStub(dnnlib.EasyDict):
63
+ pass
64
+
65
+ class _LegacyUnpickler(pickle.Unpickler):
66
+ def find_class(self, module, name):
67
+ if module == 'dnnlib.tflib.network' and name == 'Network':
68
+ return _TFNetworkStub
69
+ if module == 'training.networks_baseline':
70
+ module = 'training.networks'
71
+ if module[:12] == 'BaselineGAN.':
72
+ module = 'R3GAN.' + module[12:]
73
+ return super().find_class(module, name)
74
+
75
+ #----------------------------------------------------------------------------
76
+
77
+ def _collect_tf_params(tf_net):
78
+ # pylint: disable=protected-access
79
+ tf_params = dict()
80
+ def recurse(prefix, tf_net):
81
+ for name, value in tf_net.variables:
82
+ tf_params[prefix + name] = value
83
+ for name, comp in tf_net.components.items():
84
+ recurse(prefix + name + '/', comp)
85
+ recurse('', tf_net)
86
+ return tf_params
87
+
88
+ #----------------------------------------------------------------------------
89
+
90
+ def _populate_module_params(module, *patterns):
91
+ for name, tensor in misc.named_params_and_buffers(module):
92
+ found = False
93
+ value = None
94
+ for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
95
+ match = re.fullmatch(pattern, name)
96
+ if match:
97
+ found = True
98
+ if value_fn is not None:
99
+ value = value_fn(*match.groups())
100
+ break
101
+ try:
102
+ assert found
103
+ if value is not None:
104
+ tensor.copy_(torch.from_numpy(np.array(value)))
105
+ except:
106
+ print(name, list(tensor.shape))
107
+ raise
108
+
109
+ #----------------------------------------------------------------------------
110
+
111
+ def convert_tf_generator(tf_G):
112
+ if tf_G.version < 4:
113
+ raise ValueError('TensorFlow pickle version too low')
114
+
115
+ # Collect kwargs.
116
+ tf_kwargs = tf_G.static_kwargs
117
+ known_kwargs = set()
118
+ def kwarg(tf_name, default=None, none=None):
119
+ known_kwargs.add(tf_name)
120
+ val = tf_kwargs.get(tf_name, default)
121
+ return val if val is not None else none
122
+
123
+ # Convert kwargs.
124
+ from training import networks_stylegan2
125
+ network_class = networks_stylegan2.Generator
126
+ kwargs = dnnlib.EasyDict(
127
+ z_dim = kwarg('latent_size', 512),
128
+ c_dim = kwarg('label_size', 0),
129
+ w_dim = kwarg('dlatent_size', 512),
130
+ img_resolution = kwarg('resolution', 1024),
131
+ img_channels = kwarg('num_channels', 3),
132
+ channel_base = kwarg('fmap_base', 16384) * 2,
133
+ channel_max = kwarg('fmap_max', 512),
134
+ num_fp16_res = kwarg('num_fp16_res', 0),
135
+ conv_clamp = kwarg('conv_clamp', None),
136
+ architecture = kwarg('architecture', 'skip'),
137
+ resample_filter = kwarg('resample_kernel', [1,3,3,1]),
138
+ use_noise = kwarg('use_noise', True),
139
+ activation = kwarg('nonlinearity', 'lrelu'),
140
+ mapping_kwargs = dnnlib.EasyDict(
141
+ num_layers = kwarg('mapping_layers', 8),
142
+ embed_features = kwarg('label_fmaps', None),
143
+ layer_features = kwarg('mapping_fmaps', None),
144
+ activation = kwarg('mapping_nonlinearity', 'lrelu'),
145
+ lr_multiplier = kwarg('mapping_lrmul', 0.01),
146
+ w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
147
+ ),
148
+ )
149
+
150
+ # Check for unknown kwargs.
151
+ kwarg('truncation_psi')
152
+ kwarg('truncation_cutoff')
153
+ kwarg('style_mixing_prob')
154
+ kwarg('structure')
155
+ kwarg('conditioning')
156
+ kwarg('fused_modconv')
157
+ unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
158
+ if len(unknown_kwargs) > 0:
159
+ raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
160
+
161
+ # Collect params.
162
+ tf_params = _collect_tf_params(tf_G)
163
+ for name, value in list(tf_params.items()):
164
+ match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
165
+ if match:
166
+ r = kwargs.img_resolution // (2 ** int(match.group(1)))
167
+ tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
168
+ kwargs.synthesis.kwargs.architecture = 'orig'
169
+ #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
170
+
171
+ # Convert params.
172
+ G = network_class(**kwargs).eval().requires_grad_(False)
173
+ # pylint: disable=unnecessary-lambda
174
+ # pylint: disable=f-string-without-interpolation
175
+ _populate_module_params(G,
176
+ r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
177
+ r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
178
+ r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
179
+ r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
180
+ r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
181
+ r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
182
+ r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
183
+ r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
184
+ r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
185
+ r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
186
+ r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
187
+ r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
188
+ r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
189
+ r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
190
+ r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
191
+ r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
192
+ r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
193
+ r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
194
+ r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
195
+ r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
196
+ r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
197
+ r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
198
+ r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
199
+ r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
200
+ r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
201
+ r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
202
+ r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
203
+ r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
204
+ r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
205
+ r'.*\.resample_filter', None,
206
+ r'.*\.act_filter', None,
207
+ )
208
+ return G
209
+
210
+ #----------------------------------------------------------------------------
211
+
212
+ def convert_tf_discriminator(tf_D):
213
+ if tf_D.version < 4:
214
+ raise ValueError('TensorFlow pickle version too low')
215
+
216
+ # Collect kwargs.
217
+ tf_kwargs = tf_D.static_kwargs
218
+ known_kwargs = set()
219
+ def kwarg(tf_name, default=None):
220
+ known_kwargs.add(tf_name)
221
+ return tf_kwargs.get(tf_name, default)
222
+
223
+ # Convert kwargs.
224
+ kwargs = dnnlib.EasyDict(
225
+ c_dim = kwarg('label_size', 0),
226
+ img_resolution = kwarg('resolution', 1024),
227
+ img_channels = kwarg('num_channels', 3),
228
+ architecture = kwarg('architecture', 'resnet'),
229
+ channel_base = kwarg('fmap_base', 16384) * 2,
230
+ channel_max = kwarg('fmap_max', 512),
231
+ num_fp16_res = kwarg('num_fp16_res', 0),
232
+ conv_clamp = kwarg('conv_clamp', None),
233
+ cmap_dim = kwarg('mapping_fmaps', None),
234
+ block_kwargs = dnnlib.EasyDict(
235
+ activation = kwarg('nonlinearity', 'lrelu'),
236
+ resample_filter = kwarg('resample_kernel', [1,3,3,1]),
237
+ freeze_layers = kwarg('freeze_layers', 0),
238
+ ),
239
+ mapping_kwargs = dnnlib.EasyDict(
240
+ num_layers = kwarg('mapping_layers', 0),
241
+ embed_features = kwarg('mapping_fmaps', None),
242
+ layer_features = kwarg('mapping_fmaps', None),
243
+ activation = kwarg('nonlinearity', 'lrelu'),
244
+ lr_multiplier = kwarg('mapping_lrmul', 0.1),
245
+ ),
246
+ epilogue_kwargs = dnnlib.EasyDict(
247
+ mbstd_group_size = kwarg('mbstd_group_size', None),
248
+ mbstd_num_channels = kwarg('mbstd_num_features', 1),
249
+ activation = kwarg('nonlinearity', 'lrelu'),
250
+ ),
251
+ )
252
+
253
+ # Check for unknown kwargs.
254
+ kwarg('structure')
255
+ kwarg('conditioning')
256
+ unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
257
+ if len(unknown_kwargs) > 0:
258
+ raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
259
+
260
+ # Collect params.
261
+ tf_params = _collect_tf_params(tf_D)
262
+ for name, value in list(tf_params.items()):
263
+ match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
264
+ if match:
265
+ r = kwargs.img_resolution // (2 ** int(match.group(1)))
266
+ tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
267
+ kwargs.architecture = 'orig'
268
+ #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
269
+
270
+ # Convert params.
271
+ from training import networks_stylegan2
272
+ D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
273
+ # pylint: disable=unnecessary-lambda
274
+ # pylint: disable=f-string-without-interpolation
275
+ _populate_module_params(D,
276
+ r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
277
+ r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
278
+ r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
279
+ r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
280
+ r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
281
+ r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
282
+ r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
283
+ r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
284
+ r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
285
+ r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
286
+ r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
287
+ r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
288
+ r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
289
+ r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
290
+ r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
291
+ r'.*\.resample_filter', None,
292
+ )
293
+ return D
294
+
295
+ #----------------------------------------------------------------------------
296
+
297
+ @click.command()
298
+ @click.option('--source', help='Input pickle', required=True, metavar='PATH')
299
+ @click.option('--dest', help='Output pickle', required=True, metavar='PATH')
300
+ @click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
301
+ def convert_network_pickle(source, dest, force_fp16):
302
+ """Convert legacy network pickle into the native PyTorch format.
303
+
304
+ The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
305
+ It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
306
+
307
+ Example:
308
+
309
+ \b
310
+ python legacy.py \\
311
+ --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
312
+ --dest=stylegan2-cat-config-f.pkl
313
+ """
314
+ print(f'Loading "{source}"...')
315
+ with dnnlib.util.open_url(source) as f:
316
+ data = load_network_pkl(f, force_fp16=force_fp16)
317
+ print(f'Saving "{dest}"...')
318
+ with open(dest, 'wb') as f:
319
+ pickle.dump(data, f)
320
+ print('Done.')
321
+
322
+ #----------------------------------------------------------------------------
323
+
324
+ if __name__ == "__main__":
325
+ convert_network_pickle() # pylint: disable=no-value-for-parameter
326
+
327
+ #----------------------------------------------------------------------------