duongve commited on
Commit
af18ea0
·
verified ·
1 Parent(s): bb40c5e

Upload trainer.py

Browse files
Files changed (1) hide show
  1. QA_result/trainer.py +1208 -0
QA_result/trainer.py ADDED
@@ -0,0 +1,1208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+ """
3
+ Train a model on a dataset.
4
+
5
+ Usage:
6
+ $ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
7
+ """
8
+
9
+ import gc
10
+ import math
11
+ import os
12
+ import subprocess
13
+ import time
14
+ import warnings
15
+ from copy import copy, deepcopy
16
+ from datetime import datetime, timedelta
17
+ from pathlib import Path
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from torch import distributed as dist
23
+ from torch import nn, optim
24
+
25
+ from ultralytics import __version__
26
+ from ultralytics.cfg import get_cfg, get_save_dir
27
+ from ultralytics.data.utils import check_cls_dataset, check_det_dataset
28
+ from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
29
+ from ultralytics.utils import (
30
+ DEFAULT_CFG,
31
+ LOCAL_RANK,
32
+ LOGGER,
33
+ RANK,
34
+ TQDM,
35
+ YAML,
36
+ callbacks,
37
+ clean_url,
38
+ colorstr,
39
+ emojis,
40
+ )
41
+ from ultralytics.utils.autobatch import check_train_batch_size
42
+ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
43
+ from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
44
+ from ultralytics.utils.files import get_latest_run
45
+ from ultralytics.utils.torch_utils import (
46
+ TORCH_2_4,
47
+ EarlyStopping,
48
+ ModelEMA,
49
+ autocast,
50
+ convert_optimizer_state_dict_to_fp16,
51
+ init_seeds,
52
+ one_cycle,
53
+ select_device,
54
+ strip_optimizer,
55
+ torch_distributed_zero_first,
56
+ unset_deterministic,
57
+ )
58
+
59
+ class CWDLoss(nn.Module):
60
+ """PyTorch version of `Channel-wise Distillation for Semantic Segmentation.
61
+ <https://arxiv.org/abs/2011.13256>`_.
62
+ """
63
+
64
+ def __init__(self, channels_s, channels_t, tau=1.0):
65
+ super().__init__()
66
+ self.tau = tau
67
+
68
+ def forward(self, y_s, y_t):
69
+ """Forward computation.
70
+ Args:
71
+ y_s (list): The student model prediction with
72
+ shape (N, C, H, W) in list.
73
+ y_t (list): The teacher model prediction with
74
+ shape (N, C, H, W) in list.
75
+ Return:
76
+ torch.Tensor: The calculated loss value of all stages.
77
+ """
78
+ assert len(y_s) == len(y_t)
79
+ losses = []
80
+
81
+ for idx, (s, t) in enumerate(zip(y_s, y_t)):
82
+ assert s.shape == t.shape
83
+ N, C, H, W = s.shape
84
+
85
+ # normalize in channel dimension
86
+ softmax_pred_T = F.softmax(t.view(-1, W * H) / self.tau, dim=1)
87
+
88
+ logsoftmax = torch.nn.LogSoftmax(dim=1)
89
+ cost = torch.sum(
90
+ softmax_pred_T * logsoftmax(t.view(-1, W * H) / self.tau) -
91
+ softmax_pred_T * logsoftmax(s.view(-1, W * H) / self.tau)) * (self.tau ** 2)
92
+
93
+ losses.append(cost / (C * N))
94
+ loss = sum(losses)
95
+ return loss
96
+
97
+ class MGDLoss(nn.Module):
98
+ def __init__(self,
99
+ student_channels,
100
+ teacher_channels,
101
+ alpha_mgd=0.00002,
102
+ lambda_mgd=0.65,
103
+ ):
104
+ super(MGDLoss, self).__init__()
105
+ self.alpha_mgd = alpha_mgd
106
+ self.lambda_mgd = lambda_mgd
107
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
108
+
109
+ self.generation = nn.ModuleList([
110
+ nn.Sequential(
111
+ nn.Conv2d(s_chan, t_chan, kernel_size=3, padding=1),
112
+ nn.ReLU(inplace=True),
113
+ nn.Conv2d(t_chan, t_chan, kernel_size=3, padding=1)
114
+ ).to(device) for s_chan, t_chan in zip(student_channels, teacher_channels)
115
+ ])
116
+
117
+ def forward(self, y_s, y_t, layer=None):
118
+ """Forward computation.
119
+ Args:
120
+ y_s (list): The student model prediction with
121
+ shape (N, C, H, W) in list.
122
+ y_t (list): The teacher model prediction with
123
+ shape (N, C, H, W) in list.
124
+ Return:
125
+ torch.Tensor: The calculated loss value of all stages.
126
+ """
127
+ losses = []
128
+ for idx, (s, t) in enumerate(zip(y_s, y_t)):
129
+ # print(s.shape)
130
+ # print(t.shape)
131
+ # assert s.shape == t.shape
132
+ if layer == "outlayer":
133
+ idx = -1
134
+ losses.append(self.get_dis_loss(s, t, idx) * self.alpha_mgd)
135
+ loss = sum(losses)
136
+ return loss
137
+
138
+ def get_dis_loss(self, preds_S, preds_T, idx):
139
+ loss_mse = nn.MSELoss(reduction='sum')
140
+ N, C, H, W = preds_T.shape
141
+
142
+ device = preds_S.device
143
+ mat = torch.rand((N, 1, H, W)).to(device)
144
+ mat = torch.where(mat > 1 - self.lambda_mgd, 0, 1).to(device)
145
+
146
+ masked_fea = torch.mul(preds_S, mat)
147
+ new_fea = self.generation[idx](masked_fea)
148
+
149
+ dis_loss = loss_mse(new_fea, preds_T) / N
150
+ return dis_loss
151
+
152
+
153
+ class FeatureLoss(nn.Module):
154
+ def __init__(self, channels_s, channels_t, distiller='mgd', loss_weight=1.0):
155
+ super(FeatureLoss, self).__init__()
156
+ self.loss_weight = loss_weight
157
+ self.distiller = distiller
158
+
159
+ # Move all modules to same precision
160
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
161
+
162
+ # Convert to ModuleList and ensure consistent dtype
163
+ self.align_module = nn.ModuleList()
164
+ self.norm = nn.ModuleList()
165
+ self.norm1 = nn.ModuleList()
166
+
167
+ # Create alignment modules
168
+ for s_chan, t_chan in zip(channels_s, channels_t):
169
+ align = nn.Sequential(
170
+ nn.Conv2d(s_chan, t_chan, kernel_size=1, stride=1, padding=0),
171
+ nn.BatchNorm2d(t_chan, affine=False)
172
+ ).to(device)
173
+ self.align_module.append(align)
174
+
175
+ # Create normalization layers
176
+ for t_chan in channels_t:
177
+ self.norm.append(nn.BatchNorm2d(t_chan, affine=False).to(device))
178
+
179
+ for s_chan in channels_s:
180
+ self.norm1.append(nn.BatchNorm2d(s_chan, affine=False).to(device))
181
+
182
+ if distiller == 'mgd':
183
+ self.feature_loss = MGDLoss(channels_s, channels_t)
184
+ elif distiller == 'cwd':
185
+ self.feature_loss = CWDLoss(channels_s, channels_t)
186
+ else:
187
+ raise NotImplementedError
188
+
189
+ def forward(self, y_s, y_t):
190
+ min_len = min(len(y_s), len(y_t))
191
+ y_s = y_s[:min_len]
192
+ y_t = y_t[:min_len]
193
+
194
+ tea_feats = []
195
+ stu_feats = []
196
+
197
+ for idx, (s, t) in enumerate(zip(y_s, y_t)):
198
+ s = s.type(next(self.align_module[idx].parameters()).dtype)
199
+ t = t.type(next(self.align_module[idx].parameters()).dtype)
200
+
201
+ if self.distiller == "cwd":
202
+ s = self.align_module[idx](s)
203
+ stu_feats.append(s)
204
+ tea_feats.append(t.detach())
205
+ else:
206
+ t = self.norm[idx](t) # ✅ Correct normalization
207
+ stu_feats.append(s)
208
+ tea_feats.append(t.detach())
209
+
210
+ loss = self.feature_loss(stu_feats, tea_feats)
211
+ return self.loss_weight * loss
212
+
213
+
214
+ class DistillationLoss:
215
+ def __init__(self, models, modelt, distiller="CWDLoss"):
216
+ self.distiller = distiller
217
+ self.layers = ["6", "8", "13", "16", "19", "22"]
218
+ self.models = models
219
+ self.modelt = modelt
220
+
221
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
222
+ # ini warm up
223
+ with torch.no_grad():
224
+ dummy_input = torch.randn(1, 3, 640, 640)
225
+ _ = self.models(dummy_input.to(device))
226
+ _ = self.modelt(dummy_input.to(device))
227
+
228
+ self.channels_s = []
229
+ self.channels_t = []
230
+ self.teacher_module_pairs = []
231
+ self.student_module_pairs = []
232
+ self.remove_handle = []
233
+
234
+ self._find_layers()
235
+
236
+ self.distill_loss_fn = FeatureLoss(
237
+ channels_s=self.channels_s,
238
+ channels_t=self.channels_t,
239
+ distiller=distiller[:3]
240
+ )
241
+
242
+ def _find_layers(self):
243
+
244
+ self.channels_s = []
245
+ self.channels_t = []
246
+ self.teacher_module_pairs = []
247
+ self.student_module_pairs = []
248
+
249
+ for name, ml in self.modelt.named_modules():
250
+ if name is not None:
251
+ name = name.split(".")
252
+ # print(name)
253
+
254
+ if name[0] != "model":
255
+ continue
256
+ if len(name) >= 3:
257
+ if name[1] in self.layers:
258
+ if "cv2" in name[2]:
259
+ if hasattr(ml, 'conv'):
260
+ self.channels_t.append(ml.conv.out_channels)
261
+ self.teacher_module_pairs.append(ml)
262
+ # print()
263
+ for name, ml in self.models.named_modules():
264
+ if name is not None:
265
+ name = name.split(".")
266
+ # print(name)
267
+ if name[0] != "model":
268
+ continue
269
+ if len(name) >= 3:
270
+ if name[1] in self.layers:
271
+ if "cv2" in name[2]:
272
+ if hasattr(ml, 'conv'):
273
+ self.channels_s.append(ml.conv.out_channels)
274
+ self.student_module_pairs.append(ml)
275
+
276
+ nl = min(len(self.channels_s), len(self.channels_t))
277
+ self.channels_s = self.channels_s[-nl:]
278
+ self.channels_t = self.channels_t[-nl:]
279
+ self.teacher_module_pairs = self.teacher_module_pairs[-nl:]
280
+ self.student_module_pairs = self.student_module_pairs[-nl:]
281
+
282
+ def register_hook(self):
283
+ # Remove the existing hook if they exist
284
+ self.remove_handle_()
285
+
286
+ self.teacher_outputs = []
287
+ self.student_outputs = []
288
+
289
+ def make_student_hook(l):
290
+ def forward_hook(m, input, output):
291
+ if isinstance(output, torch.Tensor):
292
+ out = output.clone() # Clone to ensure we don't modify the original
293
+ l.append(out)
294
+ else:
295
+ l.append([o.clone() if isinstance(o, torch.Tensor) else o for o in output])
296
+ return forward_hook
297
+
298
+ def make_teacher_hook(l):
299
+ def forward_hook(m, input, output):
300
+ if isinstance(output, torch.Tensor):
301
+ l.append(output.detach().clone()) # Detach and clone teacher outputs
302
+ else:
303
+ l.append([o.detach().clone() if isinstance(o, torch.Tensor) else o for o in output])
304
+ return forward_hook
305
+
306
+ for ml, ori in zip(self.teacher_module_pairs, self.student_module_pairs):
307
+ self.remove_handle.append(ml.register_forward_hook(make_teacher_hook(self.teacher_outputs)))
308
+ self.remove_handle.append(ori.register_forward_hook(make_student_hook(self.student_outputs)))
309
+
310
+ def get_loss(self):
311
+ if not self.teacher_outputs or not self.student_outputs:
312
+ return torch.tensor(0.0, requires_grad=True)
313
+
314
+ if len(self.teacher_outputs) != len(self.student_outputs):
315
+ print(f"Warning: Mismatched outputs - Teacher: {len(self.teacher_outputs)}, Student: {len(self.student_outputs)}")
316
+ return torch.tensor(0.0, requires_grad=True)
317
+
318
+ quant_loss = self.distill_loss_fn(y_s=self.student_outputs, y_t=self.teacher_outputs)
319
+
320
+ if self.distiller != 'cwd':
321
+ quant_loss *= 0.3
322
+
323
+ self.teacher_outputs.clear()
324
+ self.student_outputs.clear()
325
+
326
+ return quant_loss
327
+
328
+ def remove_handle_(self):
329
+ for rm in self.remove_handle:
330
+ rm.remove()
331
+ self.remove_handle.clear()
332
+
333
+
334
+
335
+ class BaseTrainer:
336
+ """
337
+ A base class for creating trainers.
338
+
339
+ This class provides the foundation for training YOLO models, handling the training loop, validation, checkpointing,
340
+ and various training utilities. It supports both single-GPU and multi-GPU distributed training.
341
+
342
+ Attributes:
343
+ args (SimpleNamespace): Configuration for the trainer.
344
+ validator (BaseValidator): Validator instance.
345
+ model (nn.Module): Model instance.
346
+ callbacks (defaultdict): Dictionary of callbacks.
347
+ save_dir (Path): Directory to save results.
348
+ wdir (Path): Directory to save weights.
349
+ last (Path): Path to the last checkpoint.
350
+ best (Path): Path to the best checkpoint.
351
+ save_period (int): Save checkpoint every x epochs (disabled if < 1).
352
+ batch_size (int): Batch size for training.
353
+ epochs (int): Number of epochs to train for.
354
+ start_epoch (int): Starting epoch for training.
355
+ device (torch.device): Device to use for training.
356
+ amp (bool): Flag to enable AMP (Automatic Mixed Precision).
357
+ scaler (amp.GradScaler): Gradient scaler for AMP.
358
+ data (str): Path to data.
359
+ ema (nn.Module): EMA (Exponential Moving Average) of the model.
360
+ resume (bool): Resume training from a checkpoint.
361
+ lf (nn.Module): Loss function.
362
+ scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler.
363
+ best_fitness (float): The best fitness value achieved.
364
+ fitness (float): Current fitness value.
365
+ loss (float): Current loss value.
366
+ tloss (float): Total loss value.
367
+ loss_names (list): List of loss names.
368
+ csv (Path): Path to results CSV file.
369
+ metrics (dict): Dictionary of metrics.
370
+ plots (dict): Dictionary of plots.
371
+
372
+ Methods:
373
+ train: Execute the training process.
374
+ validate: Run validation on the test set.
375
+ save_model: Save model training checkpoints.
376
+ get_dataset: Get train and validation datasets.
377
+ setup_model: Load, create, or download model.
378
+ build_optimizer: Construct an optimizer for the model.
379
+
380
+ Examples:
381
+ Initialize a trainer and start training
382
+ >>> trainer = BaseTrainer(cfg="config.yaml")
383
+ >>> trainer.train()
384
+ """
385
+
386
+ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
387
+ """
388
+ Initialize the BaseTrainer class.
389
+
390
+ Args:
391
+ cfg (str, optional): Path to a configuration file.
392
+ overrides (dict, optional): Configuration overrides.
393
+ _callbacks (list, optional): List of callback functions.
394
+ """
395
+ self.args = get_cfg(cfg, overrides)
396
+ self.check_resume(overrides)
397
+ self.device = select_device(self.args.device, self.args.batch)
398
+ # Update "-1" devices so post-training val does not repeat search
399
+ self.args.device = os.getenv("CUDA_VISIBLE_DEVICES") if "cuda" in str(self.device) else str(self.device)
400
+ self.validator = None
401
+ self.metrics = None
402
+ self.plots = {}
403
+
404
+ if overrides:
405
+ self.teacher = overrides.get("teacher", None)
406
+ self.loss_type = overrides.get("distillation_loss", None)
407
+ if "teacher" in overrides:
408
+ overrides.pop("teacher")
409
+ if "distillation_loss" in overrides:
410
+ overrides.pop("distillation_loss")
411
+ else:
412
+ self.loss_type = None
413
+ self.teacher = None
414
+
415
+ init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)
416
+
417
+ # Dirs
418
+ self.save_dir = get_save_dir(self.args)
419
+ self.args.name = self.save_dir.name # update name for loggers
420
+ self.wdir = self.save_dir / "weights" # weights dir
421
+ if RANK in {-1, 0}:
422
+ self.wdir.mkdir(parents=True, exist_ok=True) # make dir
423
+ self.args.save_dir = str(self.save_dir)
424
+ YAML.save(self.save_dir / "args.yaml", vars(self.args)) # save run args
425
+ self.last, self.best = self.wdir / "last.pt", self.wdir / "best.pt" # checkpoint paths
426
+ self.save_period = self.args.save_period
427
+
428
+ self.batch_size = self.args.batch
429
+ self.epochs = self.args.epochs or 100 # in case users accidentally pass epochs=None with timed training
430
+ self.start_epoch = 0
431
+ if RANK == -1:
432
+ print_args(vars(self.args))
433
+
434
+ # Device
435
+ if self.device.type in {"cpu", "mps"}:
436
+ self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
437
+
438
+ # Model and Dataset
439
+ self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
440
+ with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
441
+ self.data = self.get_dataset()
442
+
443
+ self.ema = None
444
+
445
+ # Optimization utils init
446
+ self.lf = None
447
+ self.scheduler = None
448
+
449
+ # Epoch level metrics
450
+ self.best_fitness = None
451
+ self.fitness = None
452
+ self.loss = None
453
+ self.tloss = None
454
+ self.loss_names = ["Loss"]
455
+ self.csv = self.save_dir / "results.csv"
456
+ self.plot_idx = [0, 1, 2]
457
+
458
+ # HUB
459
+ self.hub_session = None
460
+
461
+ # Callbacks
462
+ self.callbacks = _callbacks or callbacks.get_default_callbacks()
463
+ if RANK in {-1, 0}:
464
+ callbacks.add_integration_callbacks(self)
465
+
466
+ def add_callback(self, event: str, callback):
467
+ """Append the given callback to the event's callback list."""
468
+ self.callbacks[event].append(callback)
469
+
470
+ def set_callback(self, event: str, callback):
471
+ """Override the existing callbacks with the given callback for the specified event."""
472
+ self.callbacks[event] = [callback]
473
+
474
+ def run_callbacks(self, event: str):
475
+ """Run all existing callbacks associated with a particular event."""
476
+ for callback in self.callbacks.get(event, []):
477
+ callback(self)
478
+
479
+ def train(self):
480
+ """Allow device='', device=None on Multi-GPU systems to default to device=0."""
481
+ if isinstance(self.args.device, str) and len(self.args.device): # i.e. device='0' or device='0,1,2,3'
482
+ world_size = len(self.args.device.split(","))
483
+ elif isinstance(self.args.device, (tuple, list)): # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
484
+ world_size = len(self.args.device)
485
+ elif self.args.device in {"cpu", "mps"}: # i.e. device='cpu' or 'mps'
486
+ world_size = 0
487
+ elif torch.cuda.is_available(): # i.e. device=None or device='' or device=number
488
+ world_size = 1 # default to device 0
489
+ else: # i.e. device=None or device=''
490
+ world_size = 0
491
+
492
+ # Run subprocess if DDP training, else train normally
493
+ if world_size > 1 and "LOCAL_RANK" not in os.environ:
494
+ # Argument checks
495
+ if self.args.rect:
496
+ LOGGER.warning("'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
497
+ self.args.rect = False
498
+ if self.args.batch < 1.0:
499
+ LOGGER.warning(
500
+ "'batch<1' for AutoBatch is incompatible with Multi-GPU training, setting default 'batch=16'"
501
+ )
502
+ self.args.batch = 16
503
+
504
+ # Command
505
+ cmd, file = generate_ddp_command(world_size, self)
506
+ try:
507
+ LOGGER.info(f"{colorstr('DDP:')} debug command {' '.join(cmd)}")
508
+ subprocess.run(cmd, check=True)
509
+ except Exception as e:
510
+ raise e
511
+ finally:
512
+ ddp_cleanup(self, str(file))
513
+
514
+ else:
515
+ self._do_train(world_size)
516
+
517
+ def _setup_scheduler(self):
518
+ """Initialize training learning rate scheduler."""
519
+ if self.args.cos_lr:
520
+ self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
521
+ else:
522
+ self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf # linear
523
+ self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
524
+
525
+ def _setup_ddp(self, world_size):
526
+ """Initialize and set the DistributedDataParallel parameters for training."""
527
+ torch.cuda.set_device(RANK)
528
+ self.device = torch.device("cuda", RANK)
529
+ # LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
530
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" # set to enforce timeout
531
+ dist.init_process_group(
532
+ backend="nccl" if dist.is_nccl_available() else "gloo",
533
+ timeout=timedelta(seconds=10800), # 3 hours
534
+ rank=RANK,
535
+ world_size=world_size,
536
+ )
537
+
538
+ def _setup_train(self, world_size):
539
+ """Build dataloaders and optimizer on correct rank process."""
540
+ # Model
541
+ self.run_callbacks("on_pretrain_routine_start")
542
+ ckpt = self.setup_model()
543
+ self.model = self.model.to(self.device)
544
+
545
+ # Load teacher model to device
546
+ if self.teacher is not None:
547
+ for k, v in self.teacher.named_parameters():
548
+ v.requires_grad = True
549
+ self.teacher = self.teacher.to(self.device)
550
+
551
+ self.set_model_attributes()
552
+
553
+ # Freeze layers
554
+ freeze_list = (
555
+ self.args.freeze
556
+ if isinstance(self.args.freeze, list)
557
+ else range(self.args.freeze)
558
+ if isinstance(self.args.freeze, int)
559
+ else []
560
+ )
561
+ always_freeze_names = [".dfl"] # always freeze these layers
562
+ freeze_layer_names = [f"model.{x}." for x in freeze_list] + always_freeze_names
563
+ self.freeze_layer_names = freeze_layer_names
564
+ for k, v in self.model.named_parameters():
565
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
566
+ if any(x in k for x in freeze_layer_names):
567
+ LOGGER.info(f"Freezing layer '{k}'")
568
+ v.requires_grad = False
569
+ elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients
570
+ LOGGER.warning(
571
+ f"setting 'requires_grad=True' for frozen layer '{k}'. "
572
+ "See ultralytics.engine.trainer for customization of frozen layers."
573
+ )
574
+ v.requires_grad = True
575
+
576
+ # Check AMP
577
+ self.amp = torch.tensor(self.args.amp).to(self.device) # True or False
578
+ if self.amp and RANK in {-1, 0}: # Single-GPU and DDP
579
+ callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them
580
+ self.amp = torch.tensor(check_amp(self.model), device=self.device)
581
+ callbacks.default_callbacks = callbacks_backup # restore callbacks
582
+ if RANK > -1 and world_size > 1: # DDP
583
+ dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
584
+ self.amp = bool(self.amp) # as boolean
585
+ self.scaler = (
586
+ torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
587
+ )
588
+ if world_size > 1:
589
+ self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
590
+ if self.teacher is not None:
591
+ self.teacher = nn.parallel.DistributedDataParallel(self.teacher, device_ids=[RANK])
592
+ temp = self.teacher.eval()
593
+
594
+ # Check imgsz
595
+ gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
596
+ self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
597
+ self.stride = gs # for multiscale training
598
+
599
+ # Batch size
600
+ if self.batch_size < 1 and RANK == -1: # single-GPU only, estimate best batch size
601
+ self.args.batch = self.batch_size = self.auto_batch()
602
+
603
+
604
+ # Dataloaders
605
+ batch_size = self.batch_size // max(world_size, 1)
606
+ self.train_loader = self.get_dataloader(
607
+ self.data["train"], batch_size=batch_size, rank=LOCAL_RANK, mode="train"
608
+ )
609
+ if RANK in {-1, 0}:
610
+ # Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
611
+ self.test_loader = self.get_dataloader(
612
+ self.data.get("val") or self.data.get("test"),
613
+ batch_size=batch_size if self.args.task == "obb" else batch_size * 2,
614
+ rank=-1,
615
+ mode="val",
616
+ )
617
+ self.validator = self.get_validator()
618
+ metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix="val")
619
+ self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
620
+ self.ema = ModelEMA(self.model)
621
+ if self.args.plots:
622
+ self.plot_training_labels()
623
+
624
+ # Optimizer
625
+ self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
626
+ weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
627
+ iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
628
+ self.optimizer = self.build_optimizer(
629
+ model=self.model,
630
+ teacher=self.teacher,
631
+ name=self.args.optimizer,
632
+ lr=self.args.lr0,
633
+ momentum=self.args.momentum,
634
+ decay=weight_decay,
635
+ iterations=iterations,
636
+ )
637
+ # Scheduler
638
+ self._setup_scheduler()
639
+ self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
640
+ self.resume_training(ckpt)
641
+ self.scheduler.last_epoch = self.start_epoch - 1 # do not move
642
+ self.run_callbacks("on_pretrain_routine_end")
643
+
644
+ def _do_train(self, world_size=1):
645
+ """Train the model with the specified world size."""
646
+ if world_size > 1:
647
+ self._setup_ddp(world_size)
648
+ self._setup_train(world_size)
649
+
650
+ nb = len(self.train_loader) # number of batches
651
+ nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1 # warmup iterations
652
+ last_opt_step = -1
653
+ self.epoch_time = None
654
+ self.epoch_time_start = time.time()
655
+ self.train_time_start = time.time()
656
+ self.run_callbacks("on_train_start")
657
+ LOGGER.info(
658
+ f"Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n"
659
+ f"Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n"
660
+ f"Logging results to {colorstr('bold', self.save_dir)}\n"
661
+ f"Starting training for " + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...")
662
+ )
663
+ if self.args.close_mosaic:
664
+ base_idx = (self.epochs - self.args.close_mosaic) * nb
665
+ self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2])
666
+
667
+ # make loss
668
+ if self.teacher is not None:
669
+ distillation_loss = DistillationLoss(self.model, self.teacher, distiller=self.loss_type)
670
+
671
+ epoch = self.start_epoch
672
+ self.optimizer.zero_grad() # zero any resumed gradients to ensure stability on train start
673
+ while True:
674
+ self.epoch = epoch
675
+ self.run_callbacks("on_train_epoch_start")
676
+ with warnings.catch_warnings():
677
+ warnings.simplefilter("ignore") # suppress 'Detected lr_scheduler.step() before optimizer.step()'
678
+ self.scheduler.step()
679
+
680
+ self._model_train()
681
+ if RANK != -1:
682
+ self.train_loader.sampler.set_epoch(epoch)
683
+ pbar = enumerate(self.train_loader)
684
+ # Update dataloader attributes (optional)
685
+ if epoch == (self.epochs - self.args.close_mosaic):
686
+ self._close_dataloader_mosaic()
687
+ self.train_loader.reset()
688
+
689
+ if RANK in {-1, 0}:
690
+ LOGGER.info(self.progress_string())
691
+ pbar = TQDM(enumerate(self.train_loader), total=nb)
692
+ self.tloss = None
693
+ if self.teacher is not None:
694
+ distillation_loss.register_hook()
695
+ for i, batch in pbar:
696
+ self.run_callbacks("on_train_batch_start")
697
+ # Warmup
698
+ ni = i + nb * epoch
699
+ if ni <= nw:
700
+ xi = [0, nw] # x interp
701
+ self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
702
+ for j, x in enumerate(self.optimizer.param_groups):
703
+ # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
704
+ x["lr"] = np.interp(
705
+ ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x["initial_lr"] * self.lf(epoch)]
706
+ )
707
+ if "momentum" in x:
708
+ x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
709
+
710
+ # Forward
711
+ with autocast(self.amp):
712
+ batch = self.preprocess_batch(batch)
713
+ loss, self.loss_items = self.model(batch)
714
+ self.loss = loss.sum()
715
+ if RANK != -1:
716
+ self.loss *= world_size
717
+ self.tloss = (
718
+ (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items
719
+ )
720
+
721
+ # Add more distillation logic
722
+ if self.teacher is not None:
723
+ distill_weight = ((1 - math.cos(i * math.pi / len(self.train_loader))) / 2) * (0.1 - 1) + 1
724
+ with torch.no_grad():
725
+ pred = self.teacher(batch['img'])
726
+
727
+ self.d_loss = distillation_loss.get_loss()
728
+ self.d_loss *- distill_weight
729
+ self.loss += self.d_loss
730
+ # Backward
731
+ self.scaler.scale(self.loss).backward()
732
+
733
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
734
+ if ni - last_opt_step >= self.accumulate:
735
+ self.optimizer_step()
736
+ last_opt_step = ni
737
+
738
+ # Timed stopping
739
+ if self.args.time:
740
+ self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
741
+ if RANK != -1: # if DDP training
742
+ broadcast_list = [self.stop if RANK == 0 else None]
743
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
744
+ self.stop = broadcast_list[0]
745
+ if self.stop: # training time exceeded
746
+ break
747
+
748
+ # Log
749
+ if RANK in {-1, 0}:
750
+ loss_length = self.tloss.shape[0] if len(self.tloss.shape) else 1
751
+ pbar.set_description(
752
+ ("%11s" * 2 + "%11.4g" * (2 + loss_length))
753
+ % (
754
+ f"{epoch + 1}/{self.epochs}",
755
+ f"{self._get_memory():.3g}G", # (GB) GPU memory util
756
+ *(self.tloss if loss_length > 1 else torch.unsqueeze(self.tloss, 0)), # losses
757
+ batch["cls"].shape[0], # batch size, i.e. 8
758
+ batch["img"].shape[-1], # imgsz, i.e 640
759
+ )
760
+ )
761
+ self.run_callbacks("on_batch_end")
762
+ if self.args.plots and ni in self.plot_idx:
763
+ self.plot_training_samples(batch, ni)
764
+
765
+ self.run_callbacks("on_train_batch_end")
766
+
767
+ # More distillation logic
768
+ if self.teacher is not None:
769
+ distillation_loss.remove_handle_()
770
+
771
+
772
+ self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
773
+ self.run_callbacks("on_train_epoch_end")
774
+ if RANK in {-1, 0}:
775
+ final_epoch = epoch + 1 >= self.epochs
776
+ self.ema.update_attr(self.model, include=["yaml", "nc", "args", "names", "stride", "class_weights"])
777
+
778
+ # Validation
779
+ if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
780
+ self._clear_memory(threshold=0.5) # prevent VRAM spike
781
+ self.metrics, self.fitness = self.validate()
782
+ self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
783
+ self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
784
+ if self.args.time:
785
+ self.stop |= (time.time() - self.train_time_start) > (self.args.time * 3600)
786
+
787
+ # Save model
788
+ if self.args.save or final_epoch:
789
+ self.save_model()
790
+ self.run_callbacks("on_model_save")
791
+
792
+ # Scheduler
793
+ t = time.time()
794
+ self.epoch_time = t - self.epoch_time_start
795
+ self.epoch_time_start = t
796
+ if self.args.time:
797
+ mean_epoch_time = (t - self.train_time_start) / (epoch - self.start_epoch + 1)
798
+ self.epochs = self.args.epochs = math.ceil(self.args.time * 3600 / mean_epoch_time)
799
+ self._setup_scheduler()
800
+ self.scheduler.last_epoch = self.epoch # do not move
801
+ self.stop |= epoch >= self.epochs # stop if exceeded epochs
802
+ self.run_callbacks("on_fit_epoch_end")
803
+ self._clear_memory(0.5) # clear if memory utilization > 50%
804
+
805
+ # Early Stopping
806
+ if RANK != -1: # if DDP training
807
+ broadcast_list = [self.stop if RANK == 0 else None]
808
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
809
+ self.stop = broadcast_list[0]
810
+ if self.stop:
811
+ break # must break all DDP ranks
812
+ epoch += 1
813
+
814
+ if RANK in {-1, 0}:
815
+ # Do final val with best.pt
816
+ seconds = time.time() - self.train_time_start
817
+ LOGGER.info(f"\n{epoch - self.start_epoch + 1} epochs completed in {seconds / 3600:.3f} hours.")
818
+ self.final_eval()
819
+ if self.args.plots:
820
+ self.plot_metrics()
821
+ self.run_callbacks("on_train_end")
822
+ self._clear_memory()
823
+
824
+ # Distill logic
825
+ if self.teacher is not None:
826
+ distillation_loss.remove_handle_()
827
+ unset_deterministic()
828
+
829
+ self.run_callbacks("teardown")
830
+
831
+ def auto_batch(self, max_num_obj=0):
832
+ """Calculate optimal batch size based on model and device memory constraints."""
833
+ return check_train_batch_size(
834
+ model=self.model,
835
+ imgsz=self.args.imgsz,
836
+ amp=self.amp,
837
+ batch=self.batch_size,
838
+ max_num_obj=max_num_obj,
839
+ ) # returns batch size
840
+
841
+ def _get_memory(self, fraction=False):
842
+ """Get accelerator memory utilization in GB or as a fraction of total memory."""
843
+ memory, total = 0, 0
844
+ if self.device.type == "mps":
845
+ memory = torch.mps.driver_allocated_memory()
846
+ if fraction:
847
+ return __import__("psutil").virtual_memory().percent / 100
848
+ elif self.device.type != "cpu":
849
+ memory = torch.cuda.memory_reserved()
850
+ if fraction:
851
+ total = torch.cuda.get_device_properties(self.device).total_memory
852
+ return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
853
+
854
+ def _clear_memory(self, threshold: float = None):
855
+ """Clear accelerator memory by calling garbage collector and emptying cache."""
856
+ if threshold:
857
+ assert 0 <= threshold <= 1, "Threshold must be between 0 and 1."
858
+ if self._get_memory(fraction=True) <= threshold:
859
+ return
860
+ gc.collect()
861
+ if self.device.type == "mps":
862
+ torch.mps.empty_cache()
863
+ elif self.device.type == "cpu":
864
+ return
865
+ else:
866
+ torch.cuda.empty_cache()
867
+
868
+ def read_results_csv(self):
869
+ """Read results.csv into a dictionary using pandas."""
870
+ import pandas as pd # scope for faster 'import ultralytics'
871
+
872
+ return pd.read_csv(self.csv).to_dict(orient="list")
873
+
874
+ def _model_train(self):
875
+ """Set model in training mode."""
876
+ self.model.train()
877
+ # Freeze BN stat
878
+ for n, m in self.model.named_modules():
879
+ if any(filter(lambda f: f in n, self.freeze_layer_names)) and isinstance(m, nn.BatchNorm2d):
880
+ m.eval()
881
+
882
+ def save_model(self):
883
+ """Save model training checkpoints with additional metadata."""
884
+ import io
885
+
886
+ # Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
887
+ buffer = io.BytesIO()
888
+ torch.save(
889
+ {
890
+ "epoch": self.epoch,
891
+ "best_fitness": self.best_fitness,
892
+ "model": None, # resume and final checkpoints derive from EMA
893
+ "ema": deepcopy(self.ema.ema).half(),
894
+ "updates": self.ema.updates,
895
+ "optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
896
+ "train_args": vars(self.args), # save as dict
897
+ "train_metrics": {**self.metrics, **{"fitness": self.fitness}},
898
+ "train_results": self.read_results_csv(),
899
+ "date": datetime.now().isoformat(),
900
+ "version": __version__,
901
+ "license": "AGPL-3.0 (https://ultralytics.com/license)",
902
+ "docs": "https://docs.ultralytics.com",
903
+ },
904
+ buffer,
905
+ )
906
+ serialized_ckpt = buffer.getvalue() # get the serialized content to save
907
+
908
+ # Save checkpoints
909
+ self.last.write_bytes(serialized_ckpt) # save last.pt
910
+ if self.best_fitness == self.fitness:
911
+ self.best.write_bytes(serialized_ckpt) # save best.pt
912
+ if (self.save_period > 0) and (self.epoch % self.save_period == 0):
913
+ (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
914
+ # if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
915
+ # (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint
916
+
917
+ def get_dataset(self):
918
+ """
919
+ Get train and validation datasets from data dictionary.
920
+
921
+ Returns:
922
+ (dict): A dictionary containing the training/validation/test dataset and category names.
923
+ """
924
+ try:
925
+ if self.args.task == "classify":
926
+ data = check_cls_dataset(self.args.data)
927
+ elif self.args.data.rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
928
+ "detect",
929
+ "segment",
930
+ "pose",
931
+ "obb",
932
+ }:
933
+ data = check_det_dataset(self.args.data)
934
+ if "yaml_file" in data:
935
+ self.args.data = data["yaml_file"] # for validating 'yolo train data=url.zip' usage
936
+ except Exception as e:
937
+ raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e
938
+ if self.args.single_cls:
939
+ LOGGER.info("Overriding class names with single class.")
940
+ data["names"] = {0: "item"}
941
+ data["nc"] = 1
942
+ return data
943
+
944
+ def setup_model(self):
945
+ """
946
+ Load, create, or download model for any task.
947
+
948
+ Returns:
949
+ (dict): Optional checkpoint to resume training from.
950
+ """
951
+ if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
952
+ return
953
+
954
+ cfg, weights = self.model, None
955
+ ckpt = None
956
+ if str(self.model).endswith(".pt"):
957
+ weights, ckpt = attempt_load_one_weight(self.model)
958
+ cfg = weights.yaml
959
+ elif isinstance(self.args.pretrained, (str, Path)):
960
+ weights, _ = attempt_load_one_weight(self.args.pretrained)
961
+ self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights)
962
+ return ckpt
963
+
964
+ def optimizer_step(self):
965
+ """Perform a single step of the training optimizer with gradient clipping and EMA update."""
966
+ self.scaler.unscale_(self.optimizer) # unscale gradients
967
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) # clip gradients
968
+ self.scaler.step(self.optimizer)
969
+ self.scaler.update()
970
+ self.optimizer.zero_grad()
971
+ if self.ema:
972
+ self.ema.update(self.model)
973
+
974
+ def preprocess_batch(self, batch):
975
+ """Allow custom preprocessing model inputs and ground truths depending on task type."""
976
+ return batch
977
+
978
+ def validate(self):
979
+ """
980
+ Run validation on test set using self.validator.
981
+
982
+ Returns:
983
+ metrics (dict): Dictionary of validation metrics.
984
+ fitness (float): Fitness score for the validation.
985
+ """
986
+ metrics = self.validator(self)
987
+ fitness = metrics.pop("fitness", -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found
988
+ if not self.best_fitness or self.best_fitness < fitness:
989
+ self.best_fitness = fitness
990
+ return metrics, fitness
991
+
992
+ def get_model(self, cfg=None, weights=None, verbose=True):
993
+ """Get model and raise NotImplementedError for loading cfg files."""
994
+ raise NotImplementedError("This task trainer doesn't support loading cfg files")
995
+
996
+ def get_validator(self):
997
+ """Return a NotImplementedError when the get_validator function is called."""
998
+ raise NotImplementedError("get_validator function not implemented in trainer")
999
+
1000
+ def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
1001
+ """Return dataloader derived from torch.data.Dataloader."""
1002
+ raise NotImplementedError("get_dataloader function not implemented in trainer")
1003
+
1004
+ def build_dataset(self, img_path, mode="train", batch=None):
1005
+ """Build dataset."""
1006
+ raise NotImplementedError("build_dataset function not implemented in trainer")
1007
+
1008
+ def label_loss_items(self, loss_items=None, prefix="train"):
1009
+ """
1010
+ Return a loss dict with labelled training loss items tensor.
1011
+
1012
+ Note:
1013
+ This is not needed for classification but necessary for segmentation & detection
1014
+ """
1015
+ return {"loss": loss_items} if loss_items is not None else ["loss"]
1016
+
1017
+ def set_model_attributes(self):
1018
+ """Set or update model parameters before training."""
1019
+ self.model.names = self.data["names"]
1020
+
1021
+ def build_targets(self, preds, targets):
1022
+ """Build target tensors for training YOLO model."""
1023
+ pass
1024
+
1025
+ def progress_string(self):
1026
+ """Return a string describing training progress."""
1027
+ return ""
1028
+
1029
+ # TODO: may need to put these following functions into callback
1030
+ def plot_training_samples(self, batch, ni):
1031
+ """Plot training samples during YOLO training."""
1032
+ pass
1033
+
1034
+ def plot_training_labels(self):
1035
+ """Plot training labels for YOLO model."""
1036
+ pass
1037
+
1038
+ def save_metrics(self, metrics):
1039
+ """Save training metrics to a CSV file."""
1040
+ keys, vals = list(metrics.keys()), list(metrics.values())
1041
+ n = len(metrics) + 2 # number of cols
1042
+ s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
1043
+ t = time.time() - self.train_time_start
1044
+ with open(self.csv, "a", encoding="utf-8") as f:
1045
+ f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
1046
+
1047
+ def plot_metrics(self):
1048
+ """Plot and display metrics visually."""
1049
+ pass
1050
+
1051
+ def on_plot(self, name, data=None):
1052
+ """Register plots (e.g. to be consumed in callbacks)."""
1053
+ path = Path(name)
1054
+ self.plots[path] = {"data": data, "timestamp": time.time()}
1055
+
1056
+ def final_eval(self):
1057
+ """Perform final evaluation and validation for object detection YOLO model."""
1058
+ ckpt = {}
1059
+ for f in self.last, self.best:
1060
+ if f.exists():
1061
+ if f is self.last:
1062
+ ckpt = strip_optimizer(f)
1063
+ elif f is self.best:
1064
+ k = "train_results" # update best.pt train_metrics from last.pt
1065
+ strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
1066
+ LOGGER.info(f"\nValidating {f}...")
1067
+ self.validator.args.plots = self.args.plots
1068
+ self.metrics = self.validator(model=f)
1069
+ self.metrics.pop("fitness", None)
1070
+ self.run_callbacks("on_fit_epoch_end")
1071
+
1072
+ def check_resume(self, overrides):
1073
+ """Check if resume checkpoint exists and update arguments accordingly."""
1074
+ resume = self.args.resume
1075
+ if resume:
1076
+ try:
1077
+ exists = isinstance(resume, (str, Path)) and Path(resume).exists()
1078
+ last = Path(check_file(resume) if exists else get_latest_run())
1079
+
1080
+ # Check that resume data YAML exists, otherwise strip to force re-download of dataset
1081
+ ckpt_args = attempt_load_weights(last).args
1082
+ if not isinstance(ckpt_args["data"], dict) and not Path(ckpt_args["data"]).exists():
1083
+ ckpt_args["data"] = self.args.data
1084
+
1085
+ resume = True
1086
+ self.args = get_cfg(ckpt_args)
1087
+ self.args.model = self.args.resume = str(last) # reinstate model
1088
+ for k in (
1089
+ "imgsz",
1090
+ "batch",
1091
+ "device",
1092
+ "close_mosaic",
1093
+ ): # allow arg updates to reduce memory or update device on resume
1094
+ if k in overrides:
1095
+ setattr(self.args, k, overrides[k])
1096
+
1097
+ except Exception as e:
1098
+ raise FileNotFoundError(
1099
+ "Resume checkpoint not found. Please pass a valid checkpoint to resume from, "
1100
+ "i.e. 'yolo train resume model=path/to/last.pt'"
1101
+ ) from e
1102
+ self.resume = resume
1103
+
1104
+ def resume_training(self, ckpt):
1105
+ """Resume YOLO training from given epoch and best fitness."""
1106
+ if ckpt is None or not self.resume:
1107
+ return
1108
+ best_fitness = 0.0
1109
+ start_epoch = ckpt.get("epoch", -1) + 1
1110
+ if ckpt.get("optimizer", None) is not None:
1111
+ self.optimizer.load_state_dict(ckpt["optimizer"]) # optimizer
1112
+ best_fitness = ckpt["best_fitness"]
1113
+ if self.ema and ckpt.get("ema"):
1114
+ self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA
1115
+ self.ema.updates = ckpt["updates"]
1116
+ assert start_epoch > 0, (
1117
+ f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
1118
+ f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
1119
+ )
1120
+ LOGGER.info(f"Resuming training {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs")
1121
+ if self.epochs < start_epoch:
1122
+ LOGGER.info(
1123
+ f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
1124
+ )
1125
+ self.epochs += ckpt["epoch"] # finetune additional epochs
1126
+ self.best_fitness = best_fitness
1127
+ self.start_epoch = start_epoch
1128
+ if start_epoch > (self.epochs - self.args.close_mosaic):
1129
+ self._close_dataloader_mosaic()
1130
+
1131
+ def _close_dataloader_mosaic(self):
1132
+ """Update dataloaders to stop using mosaic augmentation."""
1133
+ if hasattr(self.train_loader.dataset, "mosaic"):
1134
+ self.train_loader.dataset.mosaic = False
1135
+ if hasattr(self.train_loader.dataset, "close_mosaic"):
1136
+ LOGGER.info("Closing dataloader mosaic")
1137
+ self.train_loader.dataset.close_mosaic(hyp=copy(self.args))
1138
+
1139
+ def build_optimizer(self, model, teacher=None, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5):
1140
+ """
1141
+ Construct an optimizer for the given model.
1142
+
1143
+ Args:
1144
+ model (torch.nn.Module): The model for which to build an optimizer.
1145
+ name (str, optional): The name of the optimizer to use. If 'auto', the optimizer is selected
1146
+ based on the number of iterations.
1147
+ lr (float, optional): The learning rate for the optimizer.
1148
+ momentum (float, optional): The momentum factor for the optimizer.
1149
+ decay (float, optional): The weight decay for the optimizer.
1150
+ iterations (float, optional): The number of iterations, which determines the optimizer if
1151
+ name is 'auto'.
1152
+
1153
+ Returns:
1154
+ (torch.optim.Optimizer): The constructed optimizer.
1155
+ """
1156
+ g = [], [], [] # optimizer parameter groups
1157
+ bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
1158
+ if name == "auto":
1159
+ LOGGER.info(
1160
+ f"{colorstr('optimizer:')} 'optimizer=auto' found, "
1161
+ f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
1162
+ f"determining best 'optimizer', 'lr0' and 'momentum' automatically... "
1163
+ )
1164
+ nc = self.data.get("nc", 10) # number of classes
1165
+ lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
1166
+ name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
1167
+ self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
1168
+
1169
+ for module_name, module in model.named_modules():
1170
+ for param_name, param in module.named_parameters(recurse=False):
1171
+ fullname = f"{module_name}.{param_name}" if module_name else param_name
1172
+ if "bias" in fullname: # bias (no decay)
1173
+ g[2].append(param)
1174
+ elif isinstance(module, bn) or "logit_scale" in fullname: # weight (no decay)
1175
+ # ContrastiveHead and BNContrastiveHead included here with 'logit_scale'
1176
+ g[1].append(param)
1177
+ else: # weight (with decay)
1178
+ g[0].append(param)
1179
+
1180
+ if teacher is not None:
1181
+ for v in teacher.modules():
1182
+ if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
1183
+ g[2].append(v.bias)
1184
+ if isinstance(v, bn): # weight (no decay)
1185
+ g[1].append(v.weight)
1186
+ elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
1187
+ g[0].append(v.weight)
1188
+ optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "auto"}
1189
+ name = {x.lower(): x for x in optimizers}.get(name.lower())
1190
+ if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
1191
+ optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
1192
+ elif name == "RMSProp":
1193
+ optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
1194
+ elif name == "SGD":
1195
+ optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
1196
+ else:
1197
+ raise NotImplementedError(
1198
+ f"Optimizer '{name}' not found in list of available optimizers {optimizers}. "
1199
+ "Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
1200
+ )
1201
+
1202
+ optimizer.add_param_group({"params": g[0], "weight_decay": decay}) # add g0 with weight_decay
1203
+ optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights)
1204
+ LOGGER.info(
1205
+ f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
1206
+ f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)"
1207
+ )
1208
+ return optimizer