mlbench123 commited on
Commit
561bf8f
Β·
verified Β·
1 Parent(s): 2fbf517

Update torch_utils.py

Browse files
Files changed (1) hide show
  1. torch_utils.py +343 -343
torch_utils.py CHANGED
@@ -1,343 +1,343 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- PyTorch utils
4
- """
5
-
6
- import datetime
7
- import logging
8
- import math
9
- import os
10
- import platform
11
- import subprocess
12
- import time
13
- from contextlib import contextmanager
14
- from copy import deepcopy
15
- from pathlib import Path
16
-
17
- import torch
18
- import torch.distributed as dist
19
- import torch.nn as nn
20
- import torch.nn.functional as F
21
- import torchvision
22
-
23
- from utils.general import LOGGER
24
-
25
- try:
26
- import thop # for FLOPs computation
27
- except ImportError:
28
- thop = None
29
-
30
-
31
- @contextmanager
32
- def torch_distributed_zero_first(local_rank: int):
33
- """
34
- Decorator to make all processes in distributed training wait for each local_master to do something.
35
- """
36
- if local_rank not in [-1, 0]:
37
- dist.barrier(device_ids=[local_rank])
38
- yield
39
- if local_rank == 0:
40
- dist.barrier(device_ids=[0])
41
-
42
-
43
- def date_modified(path=__file__):
44
- # return human-readable file modification date, i.e. '2021-3-26'
45
- t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
46
- return f'{t.year}-{t.month}-{t.day}'
47
-
48
-
49
- def git_describe(path=Path(__file__).parent): # path must be a directory
50
- # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
51
- s = f'git -C {path} describe --tags --long --always'
52
- try:
53
- return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
54
- except subprocess.CalledProcessError as e:
55
- return '' # not a git repository
56
-
57
-
58
- def select_device(device='', batch_size=None):
59
- # device = 'cpu' or '0' or '0,1,2,3'
60
- s = f'YOLOv5 πŸš€ {git_describe() or date_modified()} torch {torch.__version__} ' # string
61
- device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'
62
- cpu = device == 'cpu'
63
- if cpu:
64
- os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
65
- elif device: # non-cpu device requested
66
- os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
67
- assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
68
-
69
- cuda = not cpu and torch.cuda.is_available()
70
- if cuda:
71
- devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
72
- n = len(devices) # device count
73
- if n > 1 and batch_size: # check batch_size is divisible by device_count
74
- assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
75
- space = ' ' * (len(s) + 1)
76
- for i, d in enumerate(devices):
77
- p = torch.cuda.get_device_properties(i)
78
- s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB
79
- else:
80
- s += 'CPU\n'
81
-
82
- LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
83
- return torch.device('cuda:0' if cuda else 'cpu')
84
-
85
-
86
- def time_sync():
87
- # pytorch-accurate time
88
- if torch.cuda.is_available():
89
- torch.cuda.synchronize()
90
- return time.time()
91
-
92
-
93
- def profile(input, ops, n=10, device=None):
94
- # YOLOv5 speed/memory/FLOPs profiler
95
- #
96
- # Usage:
97
- # input = torch.randn(16, 3, 640, 640)
98
- # m1 = lambda x: x * torch.sigmoid(x)
99
- # m2 = nn.SiLU()
100
- # profile(input, [m1, m2], n=100) # profile over 100 iterations
101
-
102
- results = []
103
- logging.basicConfig(format="%(message)s", level=logging.INFO)
104
- device = device or select_device()
105
- print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
106
- f"{'input':>24s}{'output':>24s}")
107
-
108
- for x in input if isinstance(input, list) else [input]:
109
- x = x.to(device)
110
- x.requires_grad = True
111
- for m in ops if isinstance(ops, list) else [ops]:
112
- m = m.to(device) if hasattr(m, 'to') else m # device
113
- m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
114
- tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
115
- try:
116
- flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
117
- except:
118
- flops = 0
119
-
120
- try:
121
- for _ in range(n):
122
- t[0] = time_sync()
123
- y = m(x)
124
- t[1] = time_sync()
125
- try:
126
- _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
127
- t[2] = time_sync()
128
- except Exception as e: # no backward method
129
- # print(e) # for debug
130
- t[2] = float('nan')
131
- tf += (t[1] - t[0]) * 1000 / n # ms per op forward
132
- tb += (t[2] - t[1]) * 1000 / n # ms per op backward
133
- mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)
134
- s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
135
- s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
136
- p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
137
- print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
138
- results.append([p, flops, mem, tf, tb, s_in, s_out])
139
- except Exception as e:
140
- print(e)
141
- results.append(None)
142
- torch.cuda.empty_cache()
143
- return results
144
-
145
-
146
- def is_parallel(model):
147
- # Returns True if model is of type DP or DDP
148
- return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
149
-
150
-
151
- def de_parallel(model):
152
- # De-parallelize a model: returns single-GPU model if model is of type DP or DDP
153
- return model.module if is_parallel(model) else model
154
-
155
-
156
- def intersect_dicts(da, db, exclude=()):
157
- # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
158
- return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
159
-
160
-
161
- def initialize_weights(model):
162
- for m in model.modules():
163
- t = type(m)
164
- if t is nn.Conv2d:
165
- pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
166
- elif t is nn.BatchNorm2d:
167
- m.eps = 1e-3
168
- m.momentum = 0.03
169
- elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
170
- m.inplace = True
171
-
172
-
173
- def find_modules(model, mclass=nn.Conv2d):
174
- # Finds layer indices matching module class 'mclass'
175
- return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
176
-
177
-
178
- def sparsity(model):
179
- # Return global model sparsity
180
- a, b = 0, 0
181
- for p in model.parameters():
182
- a += p.numel()
183
- b += (p == 0).sum()
184
- return b / a
185
-
186
-
187
- def prune(model, amount=0.3):
188
- # Prune model to requested global sparsity
189
- import torch.nn.utils.prune as prune
190
- print('Pruning model... ', end='')
191
- for name, m in model.named_modules():
192
- if isinstance(m, nn.Conv2d):
193
- prune.l1_unstructured(m, name='weight', amount=amount) # prune
194
- prune.remove(m, 'weight') # make permanent
195
- print(' %.3g global sparsity' % sparsity(model))
196
-
197
-
198
- def fuse_conv_and_bn(conv, bn):
199
- # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
200
- fusedconv = nn.Conv2d(conv.in_channels,
201
- conv.out_channels,
202
- kernel_size=conv.kernel_size,
203
- stride=conv.stride,
204
- padding=conv.padding,
205
- groups=conv.groups,
206
- bias=True).requires_grad_(False).to(conv.weight.device)
207
-
208
- # prepare filters
209
- w_conv = conv.weight.clone().view(conv.out_channels, -1)
210
- w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
211
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
212
-
213
- # prepare spatial bias
214
- b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
215
- b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
216
- fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
217
-
218
- return fusedconv
219
-
220
-
221
- def model_info(model, verbose=False, img_size=640):
222
- # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
223
- n_p = sum(x.numel() for x in model.parameters()) # number parameters
224
- n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
225
- if verbose:
226
- print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
227
- for i, (name, p) in enumerate(model.named_parameters()):
228
- name = name.replace('module_list.', '')
229
- print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
230
- (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
231
-
232
- try: # FLOPs
233
- from thop import profile
234
- stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
235
- img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
236
- flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
237
- img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
238
- fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
239
- except (ImportError, Exception):
240
- fs = ''
241
-
242
- LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
243
-
244
-
245
- def load_classifier(name='resnet101', n=2):
246
- # Loads a pretrained model reshaped to n-class output
247
- model = torchvision.models.__dict__[name](pretrained=True)
248
-
249
- # ResNet model properties
250
- # input_size = [3, 224, 224]
251
- # input_space = 'RGB'
252
- # input_range = [0, 1]
253
- # mean = [0.485, 0.456, 0.406]
254
- # std = [0.229, 0.224, 0.225]
255
-
256
- # Reshape output to n classes
257
- filters = model.fc.weight.shape[1]
258
- model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
259
- model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
260
- model.fc.out_features = n
261
- return model
262
-
263
-
264
- def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
265
- # scales img(bs,3,y,x) by ratio constrained to gs-multiple
266
- if ratio == 1.0:
267
- return img
268
- else:
269
- h, w = img.shape[2:]
270
- s = (int(h * ratio), int(w * ratio)) # new size
271
- img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
272
- if not same_shape: # pad/crop img
273
- h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
274
- return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
275
-
276
-
277
- def copy_attr(a, b, include=(), exclude=()):
278
- # Copy attributes from b to a, options to only include [...] and to exclude [...]
279
- for k, v in b.__dict__.items():
280
- if (len(include) and k not in include) or k.startswith('_') or k in exclude:
281
- continue
282
- else:
283
- setattr(a, k, v)
284
-
285
-
286
- class EarlyStopping:
287
- # YOLOv5 simple early stopper
288
- def __init__(self, patience=30):
289
- self.best_fitness = 0.0 # i.e. mAP
290
- self.best_epoch = 0
291
- self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop
292
- self.possible_stop = False # possible stop may occur next epoch
293
-
294
- def __call__(self, epoch, fitness):
295
- if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training
296
- self.best_epoch = epoch
297
- self.best_fitness = fitness
298
- delta = epoch - self.best_epoch # epochs without improvement
299
- self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch
300
- stop = delta >= self.patience # stop training if patience exceeded
301
- if stop:
302
- LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. '
303
- f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n'
304
- f'To update EarlyStopping(patience={self.patience}) pass a new patience value, '
305
- f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.')
306
- return stop
307
-
308
-
309
- class ModelEMA:
310
- """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
311
- Keep a moving average of everything in the model state_dict (parameters and buffers).
312
- This is intended to allow functionality like
313
- https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
314
- A smoothed version of the weights is necessary for some training schemes to perform well.
315
- This class is sensitive where it is initialized in the sequence of model init,
316
- GPU assignment and distributed training wrappers.
317
- """
318
-
319
- def __init__(self, model, decay=0.9999, updates=0):
320
- # Create EMA
321
- self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
322
- # if next(model.parameters()).device.type != 'cpu':
323
- # self.ema.half() # FP16 EMA
324
- self.updates = updates # number of EMA updates
325
- self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
326
- for p in self.ema.parameters():
327
- p.requires_grad_(False)
328
-
329
- def update(self, model):
330
- # Update EMA parameters
331
- with torch.no_grad():
332
- self.updates += 1
333
- d = self.decay(self.updates)
334
-
335
- msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
336
- for k, v in self.ema.state_dict().items():
337
- if v.dtype.is_floating_point:
338
- v *= d
339
- v += (1 - d) * msd[k].detach()
340
-
341
- def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
342
- # Update EMA attributes
343
- copy_attr(self.ema, model, include, exclude)
 
1
+ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ """
3
+ PyTorch utils
4
+ """
5
+
6
+ import datetime
7
+ import logging
8
+ import math
9
+ import os
10
+ import platform
11
+ import subprocess
12
+ import time
13
+ from contextlib import contextmanager
14
+ from copy import deepcopy
15
+ from pathlib import Path
16
+
17
+ import torch
18
+ import torch.distributed as dist
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ import torchvision
22
+
23
+ from general import LOGGER
24
+
25
+ try:
26
+ import thop # for FLOPs computation
27
+ except ImportError:
28
+ thop = None
29
+
30
+
31
+ @contextmanager
32
+ def torch_distributed_zero_first(local_rank: int):
33
+ """
34
+ Decorator to make all processes in distributed training wait for each local_master to do something.
35
+ """
36
+ if local_rank not in [-1, 0]:
37
+ dist.barrier(device_ids=[local_rank])
38
+ yield
39
+ if local_rank == 0:
40
+ dist.barrier(device_ids=[0])
41
+
42
+
43
+ def date_modified(path=__file__):
44
+ # return human-readable file modification date, i.e. '2021-3-26'
45
+ t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
46
+ return f'{t.year}-{t.month}-{t.day}'
47
+
48
+
49
+ def git_describe(path=Path(__file__).parent): # path must be a directory
50
+ # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
51
+ s = f'git -C {path} describe --tags --long --always'
52
+ try:
53
+ return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
54
+ except subprocess.CalledProcessError as e:
55
+ return '' # not a git repository
56
+
57
+
58
+ def select_device(device='', batch_size=None):
59
+ # device = 'cpu' or '0' or '0,1,2,3'
60
+ s = f'YOLOv5 πŸš€ {git_describe() or date_modified()} torch {torch.__version__} ' # string
61
+ device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'
62
+ cpu = device == 'cpu'
63
+ if cpu:
64
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
65
+ elif device: # non-cpu device requested
66
+ os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
67
+ assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
68
+
69
+ cuda = not cpu and torch.cuda.is_available()
70
+ if cuda:
71
+ devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
72
+ n = len(devices) # device count
73
+ if n > 1 and batch_size: # check batch_size is divisible by device_count
74
+ assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
75
+ space = ' ' * (len(s) + 1)
76
+ for i, d in enumerate(devices):
77
+ p = torch.cuda.get_device_properties(i)
78
+ s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB
79
+ else:
80
+ s += 'CPU\n'
81
+
82
+ LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
83
+ return torch.device('cuda:0' if cuda else 'cpu')
84
+
85
+
86
+ def time_sync():
87
+ # pytorch-accurate time
88
+ if torch.cuda.is_available():
89
+ torch.cuda.synchronize()
90
+ return time.time()
91
+
92
+
93
+ def profile(input, ops, n=10, device=None):
94
+ # YOLOv5 speed/memory/FLOPs profiler
95
+ #
96
+ # Usage:
97
+ # input = torch.randn(16, 3, 640, 640)
98
+ # m1 = lambda x: x * torch.sigmoid(x)
99
+ # m2 = nn.SiLU()
100
+ # profile(input, [m1, m2], n=100) # profile over 100 iterations
101
+
102
+ results = []
103
+ logging.basicConfig(format="%(message)s", level=logging.INFO)
104
+ device = device or select_device()
105
+ print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
106
+ f"{'input':>24s}{'output':>24s}")
107
+
108
+ for x in input if isinstance(input, list) else [input]:
109
+ x = x.to(device)
110
+ x.requires_grad = True
111
+ for m in ops if isinstance(ops, list) else [ops]:
112
+ m = m.to(device) if hasattr(m, 'to') else m # device
113
+ m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
114
+ tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
115
+ try:
116
+ flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
117
+ except:
118
+ flops = 0
119
+
120
+ try:
121
+ for _ in range(n):
122
+ t[0] = time_sync()
123
+ y = m(x)
124
+ t[1] = time_sync()
125
+ try:
126
+ _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
127
+ t[2] = time_sync()
128
+ except Exception as e: # no backward method
129
+ # print(e) # for debug
130
+ t[2] = float('nan')
131
+ tf += (t[1] - t[0]) * 1000 / n # ms per op forward
132
+ tb += (t[2] - t[1]) * 1000 / n # ms per op backward
133
+ mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)
134
+ s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
135
+ s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
136
+ p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
137
+ print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
138
+ results.append([p, flops, mem, tf, tb, s_in, s_out])
139
+ except Exception as e:
140
+ print(e)
141
+ results.append(None)
142
+ torch.cuda.empty_cache()
143
+ return results
144
+
145
+
146
+ def is_parallel(model):
147
+ # Returns True if model is of type DP or DDP
148
+ return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
149
+
150
+
151
+ def de_parallel(model):
152
+ # De-parallelize a model: returns single-GPU model if model is of type DP or DDP
153
+ return model.module if is_parallel(model) else model
154
+
155
+
156
+ def intersect_dicts(da, db, exclude=()):
157
+ # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
158
+ return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
159
+
160
+
161
+ def initialize_weights(model):
162
+ for m in model.modules():
163
+ t = type(m)
164
+ if t is nn.Conv2d:
165
+ pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
166
+ elif t is nn.BatchNorm2d:
167
+ m.eps = 1e-3
168
+ m.momentum = 0.03
169
+ elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
170
+ m.inplace = True
171
+
172
+
173
+ def find_modules(model, mclass=nn.Conv2d):
174
+ # Finds layer indices matching module class 'mclass'
175
+ return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
176
+
177
+
178
+ def sparsity(model):
179
+ # Return global model sparsity
180
+ a, b = 0, 0
181
+ for p in model.parameters():
182
+ a += p.numel()
183
+ b += (p == 0).sum()
184
+ return b / a
185
+
186
+
187
+ def prune(model, amount=0.3):
188
+ # Prune model to requested global sparsity
189
+ import torch.nn.utils.prune as prune
190
+ print('Pruning model... ', end='')
191
+ for name, m in model.named_modules():
192
+ if isinstance(m, nn.Conv2d):
193
+ prune.l1_unstructured(m, name='weight', amount=amount) # prune
194
+ prune.remove(m, 'weight') # make permanent
195
+ print(' %.3g global sparsity' % sparsity(model))
196
+
197
+
198
+ def fuse_conv_and_bn(conv, bn):
199
+ # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
200
+ fusedconv = nn.Conv2d(conv.in_channels,
201
+ conv.out_channels,
202
+ kernel_size=conv.kernel_size,
203
+ stride=conv.stride,
204
+ padding=conv.padding,
205
+ groups=conv.groups,
206
+ bias=True).requires_grad_(False).to(conv.weight.device)
207
+
208
+ # prepare filters
209
+ w_conv = conv.weight.clone().view(conv.out_channels, -1)
210
+ w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
211
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
212
+
213
+ # prepare spatial bias
214
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
215
+ b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
216
+ fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
217
+
218
+ return fusedconv
219
+
220
+
221
+ def model_info(model, verbose=False, img_size=640):
222
+ # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
223
+ n_p = sum(x.numel() for x in model.parameters()) # number parameters
224
+ n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
225
+ if verbose:
226
+ print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
227
+ for i, (name, p) in enumerate(model.named_parameters()):
228
+ name = name.replace('module_list.', '')
229
+ print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
230
+ (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
231
+
232
+ try: # FLOPs
233
+ from thop import profile
234
+ stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
235
+ img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
236
+ flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
237
+ img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
238
+ fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
239
+ except (ImportError, Exception):
240
+ fs = ''
241
+
242
+ LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
243
+
244
+
245
+ def load_classifier(name='resnet101', n=2):
246
+ # Loads a pretrained model reshaped to n-class output
247
+ model = torchvision.models.__dict__[name](pretrained=True)
248
+
249
+ # ResNet model properties
250
+ # input_size = [3, 224, 224]
251
+ # input_space = 'RGB'
252
+ # input_range = [0, 1]
253
+ # mean = [0.485, 0.456, 0.406]
254
+ # std = [0.229, 0.224, 0.225]
255
+
256
+ # Reshape output to n classes
257
+ filters = model.fc.weight.shape[1]
258
+ model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
259
+ model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
260
+ model.fc.out_features = n
261
+ return model
262
+
263
+
264
+ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
265
+ # scales img(bs,3,y,x) by ratio constrained to gs-multiple
266
+ if ratio == 1.0:
267
+ return img
268
+ else:
269
+ h, w = img.shape[2:]
270
+ s = (int(h * ratio), int(w * ratio)) # new size
271
+ img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
272
+ if not same_shape: # pad/crop img
273
+ h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
274
+ return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
275
+
276
+
277
+ def copy_attr(a, b, include=(), exclude=()):
278
+ # Copy attributes from b to a, options to only include [...] and to exclude [...]
279
+ for k, v in b.__dict__.items():
280
+ if (len(include) and k not in include) or k.startswith('_') or k in exclude:
281
+ continue
282
+ else:
283
+ setattr(a, k, v)
284
+
285
+
286
+ class EarlyStopping:
287
+ # YOLOv5 simple early stopper
288
+ def __init__(self, patience=30):
289
+ self.best_fitness = 0.0 # i.e. mAP
290
+ self.best_epoch = 0
291
+ self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop
292
+ self.possible_stop = False # possible stop may occur next epoch
293
+
294
+ def __call__(self, epoch, fitness):
295
+ if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training
296
+ self.best_epoch = epoch
297
+ self.best_fitness = fitness
298
+ delta = epoch - self.best_epoch # epochs without improvement
299
+ self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch
300
+ stop = delta >= self.patience # stop training if patience exceeded
301
+ if stop:
302
+ LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. '
303
+ f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n'
304
+ f'To update EarlyStopping(patience={self.patience}) pass a new patience value, '
305
+ f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.')
306
+ return stop
307
+
308
+
309
+ class ModelEMA:
310
+ """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
311
+ Keep a moving average of everything in the model state_dict (parameters and buffers).
312
+ This is intended to allow functionality like
313
+ https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
314
+ A smoothed version of the weights is necessary for some training schemes to perform well.
315
+ This class is sensitive where it is initialized in the sequence of model init,
316
+ GPU assignment and distributed training wrappers.
317
+ """
318
+
319
+ def __init__(self, model, decay=0.9999, updates=0):
320
+ # Create EMA
321
+ self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
322
+ # if next(model.parameters()).device.type != 'cpu':
323
+ # self.ema.half() # FP16 EMA
324
+ self.updates = updates # number of EMA updates
325
+ self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
326
+ for p in self.ema.parameters():
327
+ p.requires_grad_(False)
328
+
329
+ def update(self, model):
330
+ # Update EMA parameters
331
+ with torch.no_grad():
332
+ self.updates += 1
333
+ d = self.decay(self.updates)
334
+
335
+ msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
336
+ for k, v in self.ema.state_dict().items():
337
+ if v.dtype.is_floating_point:
338
+ v *= d
339
+ v += (1 - d) * msd[k].detach()
340
+
341
+ def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
342
+ # Update EMA attributes
343
+ copy_attr(self.ema, model, include, exclude)