File size: 1,949 Bytes
7393a38 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | import os
import pdb
import torch
import torch.nn as nn
class GpuDataParallel(object):
def __init__(self):
self.gpu_list = []
self.output_device = None
def set_device(self, device):
device = str(device)
if device != 'None':
self.gpu_list = [i for i in range(len(device.split(',')))]
os.environ["CUDA_VISIBLE_DEVICES"] = device
output_device = self.gpu_list[0]
self.occupy_gpu(self.gpu_list)
self.output_device = output_device if len(self.gpu_list) > 0 else "cpu"
def model_to_device(self, model):
# model = convert_model(model)
model = model.to(self.output_device)
if len(self.gpu_list) > 1:
model = nn.DataParallel(
model,
device_ids=self.gpu_list,
output_device=self.output_device)
return model
def data_to_device(self, data):
if isinstance(data, torch.FloatTensor):
return data.to(self.output_device)
elif isinstance(data, torch.DoubleTensor):
return data.float().to(self.output_device)
elif isinstance(data, torch.ByteTensor):
return data.long().to(self.output_device)
elif isinstance(data, torch.LongTensor):
return data.to(self.output_device)
elif isinstance(data, list) or isinstance(data, tuple):
return [self.data_to_device(d) for d in data]
else:
raise ValueError(data.shape, "Unknown Dtype: {}".format(data.dtype))
def criterion_to_device(self, loss):
return loss.to(self.output_device)
def occupy_gpu(self, gpus=None):
"""
make program appear on nvidia-smi.
"""
if len(gpus) == 0:
torch.zeros(1).cuda()
else:
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
for g in gpus:
torch.zeros(1).cuda(g)
|