Spaces:
Sleeping
Sleeping
| import datetime | |
| import errno | |
| import os | |
| import time | |
| from collections import defaultdict, deque | |
| import torch | |
| import torch.distributed as dist | |
| class SmoothedValue: | |
| """Track a series of values and provide access to smoothed values over a | |
| window or the global series average. | |
| """ | |
| def __init__(self, window_size=20, fmt=None): | |
| if fmt is None: | |
| fmt = "{median:.4f} ({global_avg:.4f})" | |
| self.deque = deque(maxlen=window_size) | |
| self.total = 0.0 | |
| self.count = 0 | |
| self.fmt = fmt | |
| def update(self, value, n=1): | |
| self.deque.append(value) | |
| self.count += n | |
| self.total += value * n | |
| def synchronize_between_processes(self): | |
| """ | |
| Warning: does not synchronize the deque! | |
| """ | |
| if not is_dist_avail_and_initialized(): | |
| return | |
| t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") | |
| dist.barrier() | |
| dist.all_reduce(t) | |
| t = t.tolist() | |
| self.count = int(t[0]) | |
| self.total = t[1] | |
| def median(self): | |
| d = torch.tensor(list(self.deque)) | |
| return d.median().item() | |
| def avg(self): | |
| d = torch.tensor(list(self.deque), dtype=torch.float32) | |
| return d.mean().item() | |
| def global_avg(self): | |
| return self.total / self.count | |
| def max(self): | |
| return max(self.deque) | |
| def value(self): | |
| return self.deque[-1] | |
| def __str__(self): | |
| return self.fmt.format( | |
| median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value | |
| ) | |
| def all_gather(data): | |
| """ | |
| Run all_gather on arbitrary picklable data (not necessarily tensors) | |
| Args: | |
| data: any picklable object | |
| Returns: | |
| list[data]: list of data gathered from each rank | |
| """ | |
| world_size = get_world_size() | |
| if world_size == 1: | |
| return [data] | |
| data_list = [None] * world_size | |
| dist.all_gather_object(data_list, data) | |
| return data_list | |
| def reduce_dict(input_dict, average=True): | |
| """ | |
| Args: | |
| input_dict (dict): all the values will be reduced | |
| average (bool): whether to do average or sum | |
| Reduce the values in the dictionary from all processes so that all processes | |
| have the averaged results. Returns a dict with the same fields as | |
| input_dict, after reduction. | |
| """ | |
| world_size = get_world_size() | |
| if world_size < 2: | |
| return input_dict | |
| with torch.inference_mode(): | |
| names = [] | |
| values = [] | |
| # sort the keys so that they are consistent across processes | |
| for k in sorted(input_dict.keys()): | |
| names.append(k) | |
| values.append(input_dict[k]) | |
| values = torch.stack(values, dim=0) | |
| dist.all_reduce(values) | |
| if average: | |
| values /= world_size | |
| reduced_dict = {k: v for k, v in zip(names, values)} | |
| return reduced_dict | |
| class MetricLogger: | |
| def __init__(self, delimiter="\t"): | |
| self.meters = defaultdict(SmoothedValue) | |
| self.delimiter = delimiter | |
| def update(self, **kwargs): | |
| for k, v in kwargs.items(): | |
| if isinstance(v, torch.Tensor): | |
| v = v.item() | |
| assert isinstance(v, (float, int)) | |
| self.meters[k].update(v) | |
| def __getattr__(self, attr): | |
| if attr in self.meters: | |
| return self.meters[attr] | |
| if attr in self.__dict__: | |
| return self.__dict__[attr] | |
| raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") | |
| def __str__(self): | |
| loss_str = [] | |
| for name, meter in self.meters.items(): | |
| loss_str.append(f"{name}: {str(meter)}") | |
| return self.delimiter.join(loss_str) | |
| def synchronize_between_processes(self): | |
| for meter in self.meters.values(): | |
| meter.synchronize_between_processes() | |
| def add_meter(self, name, meter): | |
| self.meters[name] = meter | |
| def log_every(self, iterable, print_freq, header=None): | |
| i = 0 | |
| if not header: | |
| header = "" | |
| start_time = time.time() | |
| end = time.time() | |
| iter_time = SmoothedValue(fmt="{avg:.4f}") | |
| data_time = SmoothedValue(fmt="{avg:.4f}") | |
| space_fmt = ":" + str(len(str(len(iterable)))) + "d" | |
| if torch.cuda.is_available(): | |
| log_msg = self.delimiter.join( | |
| [ | |
| header, | |
| "[{0" + space_fmt + "}/{1}]", | |
| "eta: {eta}", | |
| "{meters}", | |
| "time: {time}", | |
| "data: {data}", | |
| "max mem: {memory:.0f}", | |
| ] | |
| ) | |
| else: | |
| log_msg = self.delimiter.join( | |
| [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"] | |
| ) | |
| MB = 1024.0 * 1024.0 | |
| for obj in iterable: | |
| data_time.update(time.time() - end) | |
| yield obj | |
| iter_time.update(time.time() - end) | |
| if i % print_freq == 0 or i == len(iterable) - 1: | |
| eta_seconds = iter_time.global_avg * (len(iterable) - i) | |
| eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) | |
| if torch.cuda.is_available(): | |
| print( | |
| log_msg.format( | |
| i, | |
| len(iterable), | |
| eta=eta_string, | |
| meters=str(self), | |
| time=str(iter_time), | |
| data=str(data_time), | |
| memory=torch.cuda.max_memory_allocated() / MB, | |
| ) | |
| ) | |
| else: | |
| print( | |
| log_msg.format( | |
| i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time) | |
| ) | |
| ) | |
| i += 1 | |
| end = time.time() | |
| total_time = time.time() - start_time | |
| total_time_str = str(datetime.timedelta(seconds=int(total_time))) | |
| print(f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)") | |
| def collate_fn(batch): | |
| return tuple(zip(*batch)) | |
| def mkdir(path): | |
| try: | |
| os.makedirs(path) | |
| except OSError as e: | |
| if e.errno != errno.EEXIST: | |
| raise | |
| def setup_for_distributed(is_master): | |
| """ | |
| This function disables printing when not in master process | |
| """ | |
| import builtins as __builtin__ | |
| builtin_print = __builtin__.print | |
| def print(*args, **kwargs): | |
| force = kwargs.pop("force", False) | |
| if is_master or force: | |
| builtin_print(*args, **kwargs) | |
| __builtin__.print = print | |
| def is_dist_avail_and_initialized(): | |
| if not dist.is_available(): | |
| return False | |
| if not dist.is_initialized(): | |
| return False | |
| return True | |
| def get_world_size(): | |
| if not is_dist_avail_and_initialized(): | |
| return 1 | |
| return dist.get_world_size() | |
| def get_rank(): | |
| if not is_dist_avail_and_initialized(): | |
| return 0 | |
| return dist.get_rank() | |
| def is_main_process(): | |
| return get_rank() == 0 | |
| def save_on_master(*args, **kwargs): | |
| if is_main_process(): | |
| torch.save(*args, **kwargs) | |
| def init_distributed_mode(args): | |
| if "RANK" in os.environ and "WORLD_SIZE" in os.environ: | |
| args.rank = int(os.environ["RANK"]) | |
| args.world_size = int(os.environ["WORLD_SIZE"]) | |
| args.gpu = int(os.environ["LOCAL_RANK"]) | |
| elif "SLURM_PROCID" in os.environ: | |
| args.rank = int(os.environ["SLURM_PROCID"]) | |
| args.gpu = args.rank % torch.cuda.device_count() | |
| else: | |
| print("Not using distributed mode") | |
| args.distributed = False | |
| return | |
| args.distributed = True | |
| torch.cuda.set_device(args.gpu) | |
| args.dist_backend = "nccl" | |
| print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) | |
| torch.distributed.init_process_group( | |
| backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank | |
| ) | |
| torch.distributed.barrier() | |
| setup_for_distributed(args.rank == 0) |