File size: 1,456 Bytes
e14f899 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gc
import random
import logging
import os
import sys
import numpy as np
import torch
import torch.distributed as dist
# from loguru import logger
def init_dist():
"""Initializes distributed environment."""
rank = int(os.environ["RANK"])
num_gpus = torch.cuda.device_count()
local_rank = rank % num_gpus
torch.cuda.set_device(local_rank)
dist.init_process_group(backend="nccl")
return local_rank
def set_manual_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def make_contiguous(x):
if isinstance(x, torch.Tensor):
return x.contiguous()
elif isinstance(x, dict):
return {k: make_contiguous(v) for k, v in x.items()}
else:
return x
class set_worker_seed_builder():
def __init__(self, global_rank):
self.global_rank = global_rank
def __call__(self, worker_id):
set_manual_seed(torch.initial_seed() % (2 ** 32 - 1))
def free_memory():
if torch.cuda.is_available():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def set_logging(local_rank):
if local_rank == 0:
# set format
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s: %(message)s",
handlers=[logging.StreamHandler(stream=sys.stdout)])
else:
logging.basicConfig(level=logging.ERROR)
|