| |
|
| |
|
| |
|
| | import os
|
| | import subprocess
|
| |
|
| | import torch
|
| | import torch.multiprocessing as mp
|
| | from torch import distributed as dist
|
| |
|
| |
|
| | def init_dist(launcher, backend='nccl', **kwargs):
|
| | if mp.get_start_method(allow_none=True) is None:
|
| | mp.set_start_method('spawn')
|
| | if launcher == 'pytorch':
|
| | _init_dist_pytorch(backend, **kwargs)
|
| | elif launcher == 'mpi':
|
| | _init_dist_mpi(backend, **kwargs)
|
| | elif launcher == 'slurm':
|
| | _init_dist_slurm(backend, **kwargs)
|
| | else:
|
| | raise ValueError(f'Invalid launcher type: {launcher}')
|
| |
|
| |
|
| | def _init_dist_pytorch(backend, **kwargs):
|
| |
|
| | rank = int(os.environ['RANK'])
|
| | num_gpus = torch.cuda.device_count()
|
| | torch.cuda.set_device(rank % num_gpus)
|
| | dist.init_process_group(backend=backend, **kwargs)
|
| |
|
| |
|
| | def _init_dist_mpi(backend, **kwargs):
|
| | rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
|
| | num_gpus = torch.cuda.device_count()
|
| | torch.cuda.set_device(rank % num_gpus)
|
| | dist.init_process_group(backend=backend, **kwargs)
|
| |
|
| |
|
| | def _init_dist_slurm(backend, port=None):
|
| | """Initialize slurm distributed training environment.
|
| | If argument ``port`` is not specified, then the master port will be system
|
| | environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
|
| | environment variable, then a default port ``29500`` will be used.
|
| | Args:
|
| | backend (str): Backend of torch.distributed.
|
| | port (int, optional): Master port. Defaults to None.
|
| | """
|
| | proc_id = int(os.environ['SLURM_PROCID'])
|
| | ntasks = int(os.environ['SLURM_NTASKS'])
|
| | node_list = os.environ['SLURM_NODELIST']
|
| | num_gpus = torch.cuda.device_count()
|
| | torch.cuda.set_device(proc_id % num_gpus)
|
| | addr = subprocess.getoutput(
|
| | f'scontrol show hostname {node_list} | head -n1')
|
| |
|
| | if port is not None:
|
| | os.environ['MASTER_PORT'] = str(port)
|
| | elif 'MASTER_PORT' in os.environ:
|
| | pass
|
| | else:
|
| |
|
| | os.environ['MASTER_PORT'] = '29500'
|
| |
|
| | if 'MASTER_ADDR' not in os.environ:
|
| | os.environ['MASTER_ADDR'] = addr
|
| | os.environ['WORLD_SIZE'] = str(ntasks)
|
| | os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
|
| | os.environ['RANK'] = str(proc_id)
|
| | dist.init_process_group(backend=backend)
|
| |
|
| |
|
| | def get_dist_info():
|
| | if dist.is_available():
|
| | initialized = dist.is_initialized()
|
| | else:
|
| | initialized = False
|
| | if initialized:
|
| | rank = dist.get_rank()
|
| | world_size = dist.get_world_size()
|
| | else:
|
| | rank = 0
|
| | world_size = 1
|
| | return rank, world_size
|
| |
|
| |
|
| | def setup_for_distributed(is_master):
|
| | """
|
| | This function disables printing when not in master process
|
| | """
|
| | import builtins as __builtin__
|
| | builtin_print = __builtin__.print
|
| |
|
| | def print(*args, **kwargs):
|
| | force = kwargs.pop('force', False)
|
| | if is_master or force:
|
| | builtin_print(*args, **kwargs)
|
| |
|
| | __builtin__.print = print
|
| |
|