| import datetime |
| import errno |
| import os |
| import time |
| from collections import defaultdict, deque |
|
|
| import torch |
| import torch.distributed as dist |
|
|
|
|
| class SmoothedValue: |
| """Track a series of values and provide access to smoothed values over a |
| window or the global series average. |
| """ |
|
|
| def __init__(self, window_size=20, fmt=None): |
| if fmt is None: |
| fmt = "{median:.4f} ({global_avg:.4f})" |
| self.deque = deque(maxlen=window_size) |
| self.total = 0.0 |
| self.count = 0 |
| self.fmt = fmt |
|
|
| def update(self, value, n=1): |
| self.deque.append(value) |
| self.count += n |
| self.total += value * n |
|
|
| def synchronize_between_processes(self): |
| """ |
| Warning: does not synchronize the deque! |
| """ |
| t = reduce_across_processes([self.count, self.total]) |
| t = t.tolist() |
| self.count = int(t[0]) |
| self.total = t[1] |
|
|
| @property |
| def median(self): |
| d = torch.tensor(list(self.deque)) |
| return d.median().item() |
|
|
| @property |
| def avg(self): |
| d = torch.tensor(list(self.deque), dtype=torch.float32) |
| return d.mean().item() |
|
|
| @property |
| def global_avg(self): |
| return self.total / self.count |
|
|
| @property |
| def max(self): |
| return max(self.deque) |
|
|
| @property |
| def value(self): |
| return self.deque[-1] |
|
|
| def __str__(self): |
| return self.fmt.format( |
| median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value |
| ) |
|
|
|
|
| class ConfusionMatrix: |
| def __init__(self, num_classes): |
| self.num_classes = num_classes |
| self.mat = None |
|
|
| def update(self, a, b): |
| n = self.num_classes |
| if self.mat is None: |
| self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device) |
| with torch.inference_mode(): |
| k = (a >= 0) & (a < n) |
| inds = n * a[k].to(torch.int64) + b[k] |
| self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) |
|
|
| def reset(self): |
| self.mat.zero_() |
|
|
| def compute(self): |
| h = self.mat.float() |
| acc_global = torch.diag(h).sum() / h.sum() |
| acc = torch.diag(h) / h.sum(1) |
| iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h)) |
| return acc_global, acc, iu |
|
|
| def reduce_from_all_processes(self): |
| reduce_across_processes(self.mat) |
|
|
| def __str__(self): |
| acc_global, acc, iu = self.compute() |
| return ("global correct: {:.1f}\naverage row correct: {}\nIoU: {}\nmean IoU: {:.1f}").format( |
| acc_global.item() * 100, |
| [f"{i:.1f}" for i in (acc * 100).tolist()], |
| [f"{i:.1f}" for i in (iu * 100).tolist()], |
| iu.mean().item() * 100, |
| ) |
|
|
|
|
| class MetricLogger: |
| def __init__(self, delimiter="\t"): |
| self.meters = defaultdict(SmoothedValue) |
| self.delimiter = delimiter |
|
|
| def update(self, **kwargs): |
| for k, v in kwargs.items(): |
| if isinstance(v, torch.Tensor): |
| v = v.item() |
| if not isinstance(v, (float, int)): |
| raise TypeError( |
| f"This method expects the value of the input arguments to be of type float or int, instead got {type(v)}" |
| ) |
| self.meters[k].update(v) |
|
|
| def __getattr__(self, attr): |
| if attr in self.meters: |
| return self.meters[attr] |
| if attr in self.__dict__: |
| return self.__dict__[attr] |
| raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") |
|
|
| def __str__(self): |
| loss_str = [] |
| for name, meter in self.meters.items(): |
| loss_str.append(f"{name}: {str(meter)}") |
| return self.delimiter.join(loss_str) |
|
|
| def synchronize_between_processes(self): |
| for meter in self.meters.values(): |
| meter.synchronize_between_processes() |
|
|
| def add_meter(self, name, meter): |
| self.meters[name] = meter |
|
|
| def log_every(self, iterable, print_freq, header=None): |
| i = 0 |
| if not header: |
| header = "" |
| start_time = time.time() |
| end = time.time() |
| iter_time = SmoothedValue(fmt="{avg:.4f}") |
| data_time = SmoothedValue(fmt="{avg:.4f}") |
| space_fmt = ":" + str(len(str(len(iterable)))) + "d" |
| if torch.cuda.is_available(): |
| log_msg = self.delimiter.join( |
| [ |
| header, |
| "[{0" + space_fmt + "}/{1}]", |
| "eta: {eta}", |
| "{meters}", |
| "time: {time}", |
| "data: {data}", |
| "max mem: {memory:.0f}", |
| ] |
| ) |
| else: |
| log_msg = self.delimiter.join( |
| [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"] |
| ) |
| MB = 1024.0 * 1024.0 |
| for obj in iterable: |
| data_time.update(time.time() - end) |
| yield obj |
| iter_time.update(time.time() - end) |
| if i % print_freq == 0: |
| eta_seconds = iter_time.global_avg * (len(iterable) - i) |
| eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) |
| if torch.cuda.is_available(): |
| print( |
| log_msg.format( |
| i, |
| len(iterable), |
| eta=eta_string, |
| meters=str(self), |
| time=str(iter_time), |
| data=str(data_time), |
| memory=torch.cuda.max_memory_allocated() / MB, |
| ) |
| ) |
| else: |
| print( |
| log_msg.format( |
| i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time) |
| ) |
| ) |
| i += 1 |
| end = time.time() |
| total_time = time.time() - start_time |
| total_time_str = str(datetime.timedelta(seconds=int(total_time))) |
| print(f"{header} Total time: {total_time_str}") |
|
|
|
|
| def cat_list(images, fill_value=0): |
| max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) |
| batch_shape = (len(images),) + max_size |
| batched_imgs = images[0].new(*batch_shape).fill_(fill_value) |
| for img, pad_img in zip(images, batched_imgs): |
| pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) |
| return batched_imgs |
|
|
|
|
| def collate_fn(batch): |
| images, targets = list(zip(*batch)) |
| batched_imgs = cat_list(images, fill_value=0) |
| batched_targets = cat_list(targets, fill_value=255) |
| return batched_imgs, batched_targets |
|
|
|
|
| def mkdir(path): |
| try: |
| os.makedirs(path) |
| except OSError as e: |
| if e.errno != errno.EEXIST: |
| raise |
|
|
|
|
| def setup_for_distributed(is_master): |
| """ |
| This function disables printing when not in master process |
| """ |
| import builtins as __builtin__ |
|
|
| builtin_print = __builtin__.print |
|
|
| def print(*args, **kwargs): |
| force = kwargs.pop("force", False) |
| if is_master or force: |
| builtin_print(*args, **kwargs) |
|
|
| __builtin__.print = print |
|
|
|
|
| def is_dist_avail_and_initialized(): |
| if not dist.is_available(): |
| return False |
| if not dist.is_initialized(): |
| return False |
| return True |
|
|
|
|
| def get_world_size(): |
| if not is_dist_avail_and_initialized(): |
| return 1 |
| return dist.get_world_size() |
|
|
|
|
| def get_rank(): |
| if not is_dist_avail_and_initialized(): |
| return 0 |
| return dist.get_rank() |
|
|
|
|
| def is_main_process(): |
| return get_rank() == 0 |
|
|
|
|
| def save_on_master(*args, **kwargs): |
| if is_main_process(): |
| torch.save(*args, **kwargs) |
|
|
|
|
| def init_distributed_mode(args): |
| if "RANK" in os.environ and "WORLD_SIZE" in os.environ: |
| args.rank = int(os.environ["RANK"]) |
| args.world_size = int(os.environ["WORLD_SIZE"]) |
| args.gpu = int(os.environ["LOCAL_RANK"]) |
| elif "SLURM_PROCID" in os.environ: |
| args.rank = int(os.environ["SLURM_PROCID"]) |
| args.gpu = args.rank % torch.cuda.device_count() |
| elif hasattr(args, "rank"): |
| pass |
| else: |
| print("Not using distributed mode") |
| args.distributed = False |
| return |
|
|
| args.distributed = True |
|
|
| torch.cuda.set_device(args.gpu) |
| args.dist_backend = "nccl" |
| print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) |
| torch.distributed.init_process_group( |
| backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank |
| ) |
| torch.distributed.barrier() |
| setup_for_distributed(args.rank == 0) |
|
|
|
|
| def reduce_across_processes(val): |
| if not is_dist_avail_and_initialized(): |
| |
| return torch.tensor(val) |
|
|
| t = torch.tensor(val, device="cuda") if isinstance(val, int) else val.clone().detach().to("cuda") |
| dist.barrier() |
| dist.all_reduce(t) |
| return t |
|
|