code stringlengths 17 6.64M |
|---|
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False):
layers = []
for _ in range(num_blocks):
layers.append(conv3x3(inplanes, planes, dilation))
if with_bn:
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
inplanes = planes
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers
|
class VGG(nn.Module):
'VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_bn (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n '
arch_settings = {11: (1, 1, 2, 2, 2), 13: (2, 2, 2, 2, 2), 16: (2, 2, 3, 3, 3), 19: (2, 2, 4, 4, 4)}
def __init__(self, depth, with_bn=False, num_classes=(- 1), num_stages=5, dilations=(1, 1, 1, 1, 1), out_indices=(0, 1, 2, 3, 4), frozen_stages=(- 1), bn_eval=True, bn_frozen=False, ceil_mode=False, with_last_pool=True):
super(VGG, self).__init__()
if (depth not in self.arch_settings):
raise KeyError('invalid depth {} for vgg'.format(depth))
assert ((num_stages >= 1) and (num_stages <= 5))
stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
assert (len(dilations) == num_stages)
assert (max(out_indices) <= num_stages)
self.num_classes = num_classes
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.inplanes = 3
start_idx = 0
vgg_layers = []
self.range_sub_modules = []
for (i, num_blocks) in enumerate(self.stage_blocks):
num_modules = ((num_blocks * (2 + with_bn)) + 1)
end_idx = (start_idx + num_modules)
dilation = dilations[i]
planes = ((64 * (2 ** i)) if (i < 4) else 512)
vgg_layer = make_vgg_layer(self.inplanes, planes, num_blocks, dilation=dilation, with_bn=with_bn, ceil_mode=ceil_mode)
vgg_layers.extend(vgg_layer)
self.inplanes = planes
self.range_sub_modules.append([start_idx, end_idx])
start_idx = end_idx
if (not with_last_pool):
vgg_layers.pop((- 1))
self.range_sub_modules[(- 1)][1] -= 1
self.module_name = 'features'
self.add_module(self.module_name, nn.Sequential(*vgg_layers))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
vgg_layers = getattr(self, self.module_name)
for (i, num_blocks) in enumerate(self.stage_blocks):
for j in range(*self.range_sub_modules[i]):
vgg_layer = vgg_layers[j]
x = vgg_layer(x)
if (i in self.out_indices):
outs.append(x)
if (self.num_classes > 0):
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(VGG, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
vgg_layers = getattr(self, self.module_name)
if (mode and (self.frozen_stages >= 0)):
for i in range(self.frozen_stages):
for j in range(*self.range_sub_modules[i]):
mod = vgg_layers[j]
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
def constant_init(module, val, bias=0):
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.constant_(module.weight, val)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def uniform_init(module, a=0, b=1, bias=0):
nn.init.uniform_(module.weight, a, b)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def caffe2_xavier_init(module, bias=0):
kaiming_init(module, a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform')
|
def bias_init_with_prob(prior_prob):
' initialize conv/fc bias value according to giving probablity'
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init
|
class BaseFileHandler(metaclass=ABCMeta):
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode='r', **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode='w', **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs)
|
class JsonHandler(BaseFileHandler):
def load_from_fileobj(self, file):
return json.load(file)
def dump_to_fileobj(self, obj, file, **kwargs):
json.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
return json.dumps(obj, **kwargs)
|
class PickleHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super(PickleHandler, self).load_from_path(filepath, mode='rb', **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('protocol', 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('protocol', 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super(PickleHandler, self).dump_to_path(obj, filepath, mode='wb', **kwargs)
|
class YamlHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
kwargs.setdefault('Loader', Loader)
return yaml.load(file, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault('Dumper', Dumper)
yaml.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault('Dumper', Dumper)
return yaml.dump(obj, **kwargs)
|
def load(file, file_format=None, **kwargs):
'Load data from json/yaml/pickle files.\n\n This method provides a unified api for loading data from serialized files.\n\n Args:\n file (str or :obj:`Path` or file-like object): Filename or a file-like\n object.\n file_format (str, optional): If not specified, the file format will be\n inferred from the file extension, otherwise use the specified one.\n Currently supported formats include "json", "yaml/yml" and\n "pickle/pkl".\n\n Returns:\n The content from the file.\n '
if isinstance(file, Path):
file = str(file)
if ((file_format is None) and is_str(file)):
file_format = file.split('.')[(- 1)]
if (file_format not in file_handlers):
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
|
def dump(obj, file=None, file_format=None, **kwargs):
'Dump data to json/yaml/pickle strings or files.\n\n This method provides a unified api for dumping data as strings or to files,\n and also supports custom arguments for each file format.\n\n Args:\n obj (any): The python object to be dumped.\n file (str or :obj:`Path` or file-like object, optional): If not\n specified, then the object is dump to a str, otherwise to a file\n specified by the filename or file-like object.\n file_format (str, optional): Same as :func:`load`.\n\n Returns:\n bool: True for success, False otherwise.\n '
if isinstance(file, Path):
file = str(file)
if (file_format is None):
if is_str(file):
file_format = file.split('.')[(- 1)]
elif (file is None):
raise ValueError('file_format must be specified since file is None')
if (file_format not in file_handlers):
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if (file is None):
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
|
def _register_handler(handler, file_formats):
'Register a handler for some file extensions.\n\n Args:\n handler (:obj:`BaseFileHandler`): Handler to be registered.\n file_formats (str or list[str]): File formats to be handled by this\n handler.\n '
if (not isinstance(handler, BaseFileHandler)):
raise TypeError('handler must be a child of BaseFileHandler, not {}'.format(type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if (not is_list_of(file_formats, str)):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
|
def register_handler(file_formats, **kwargs):
def wrap(cls):
_register_handler(cls(**kwargs), file_formats)
return cls
return wrap
|
def list_from_file(filename, prefix='', offset=0, max_num=0):
'Load a text file and parse the content as a list of strings.\n\n Args:\n filename (str): Filename.\n prefix (str): The prefix to be inserted to the begining of each item.\n offset (int): The offset of lines.\n max_num (int): The maximum number of lines to be read,\n zeros and negatives mean no limitation.\n\n Returns:\n list[str]: A list of strings.\n '
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if ((max_num > 0) and (cnt >= max_num)):
break
item_list.append((prefix + line.rstrip('\n')))
cnt += 1
return item_list
|
def dict_from_file(filename, key_type=str):
"Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns splited by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will be parsed as dict values.\n\n Args:\n filename(str): Filename.\n key_type(type): Type of the dict's keys. str is user by default and\n type conversion will be performed if specified.\n\n Returns:\n dict: The parsed contents.\n "
mapping = {}
with open(filename, 'r') as f:
for line in f:
items = line.rstrip('\n').split()
assert (len(items) >= 2)
key = key_type(items[0])
val = (items[1:] if (len(items) > 2) else items[1])
mapping[key] = val
return mapping
|
def solarize(img, thr=128):
'Solarize an image (invert all pixel values above a threshold)\n\n Args:\n img (ndarray): Image to be solarized.\n thr (int): Threshold for solarizing (0 - 255).\n\n Returns:\n ndarray: The solarized image.\n '
img = np.where((img < thr), img, (255 - img))
return img
|
def posterize(img, bits):
'Posterize an image (reduce the number of bits for each color channel)\n\n Args:\n img (ndarray): Image to be posterized.\n bits (int): Number of bits (1 to 8) to use for posterizing.\n\n Returns:\n ndarray: The posterized image.\n '
shift = (8 - bits)
img = np.left_shift(np.right_shift(img, shift), shift)
return img
|
def iminvert(img):
'Invert (negate) an image\n Args:\n img (ndarray): Image to be inverted.\n\n Returns:\n ndarray: The inverted image.\n '
return (np.full_like(img, 255) - img)
|
def bgr2gray(img, keepdim=False):
'Convert a BGR image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n '
out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if keepdim:
out_img = out_img[(..., None)]
return out_img
|
def rgb2gray(img, keepdim=False):
'Convert a RGB image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale image.\n '
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if keepdim:
out_img = out_img[(..., None)]
return out_img
|
def gray2bgr(img):
'Convert a grayscale image to BGR image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n '
img = (img[(..., None)] if (img.ndim == 2) else img)
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return out_img
|
def gray2rgb(img):
'Convert a grayscale image to RGB image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n '
img = (img[(..., None)] if (img.ndim == 2) else img)
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
return out_img
|
def convert_color_factory(src, dst):
code = getattr(cv2, 'COLOR_{}2{}'.format(src.upper(), dst.upper()))
def convert_color(img):
out_img = cv2.cvtColor(img, code)
return out_img
convert_color.__doc__ = 'Convert a {0} image to {1} image.\n\n Args:\n img (ndarray or str): The input image.\n\n Returns:\n ndarray: The converted {1} image.\n '.format(src.upper(), dst.upper())
return convert_color
|
def imnormalize(img, mean, std, to_rgb=True):
'Normalize an image with mean and std.\n\n Args:\n img (ndarray): Image to be normalized.\n mean (ndarray): The mean to be used for normalize.\n std (ndarray): The std to be used for normalize.\n to_rgb (bool): Whether to convert to rgb.\n\n Returns:\n ndarray: The normalized image.\n '
img = (np.float32(img) if (img.dtype != np.float32) else img.copy())
return imnormalize_(img, mean, std, to_rgb)
|
def imnormalize_(img, mean, std, to_rgb=True):
'Inplace normalize an image with mean and std.\n\n Args:\n img (ndarray): Image to be normalized.\n mean (ndarray): The mean to be used for normalize.\n std (ndarray): The std to be used for normalize.\n to_rgb (bool): Whether to convert to rgb.\n\n Returns:\n ndarray: The normalized image.\n '
assert (img.dtype != np.uint8)
mean = np.float64(mean.reshape(1, (- 1)))
stdinv = (1 / np.float64(std.reshape(1, (- 1))))
if to_rgb:
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
cv2.subtract(img, mean, img)
cv2.multiply(img, stdinv, img)
return img
|
def imdenormalize(img, mean, std, to_bgr=True):
assert (img.dtype != np.uint8)
mean = mean.reshape(1, (- 1)).astype(np.float64)
std = std.reshape(1, (- 1)).astype(np.float64)
img = cv2.multiply(img, std)
cv2.add(img, mean, img)
if to_bgr:
cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img)
return img
|
def _scale_size(size, scale):
'Rescale a size by a ratio.\n\n Args:\n size (tuple): w, h.\n scale (float): Scaling factor.\n\n Returns:\n tuple[int]: scaled size.\n '
(w, h) = size
return (int(((w * float(scale)) + 0.5)), int(((h * float(scale)) + 0.5)))
|
def imresize(img, size, return_scale=False, interpolation='bilinear', out=None):
'Resize image to a given size.\n\n Args:\n img (ndarray): The input image.\n size (tuple): Target (w, h).\n return_scale (bool): Whether to return `w_scale` and `h_scale`.\n interpolation (str): Interpolation method, accepted values are\n "nearest", "bilinear", "bicubic", "area", "lanczos".\n out (ndarray): The output destination.\n\n Returns:\n tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n `resized_img`.\n '
(h, w) = img.shape[:2]
resized_img = cv2.resize(img, size, dst=out, interpolation=interp_codes[interpolation])
if (not return_scale):
return resized_img
else:
w_scale = (size[0] / w)
h_scale = (size[1] / h)
return (resized_img, w_scale, h_scale)
|
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
'Resize image to the same size of a given image.\n\n Args:\n img (ndarray): The input image.\n dst_img (ndarray): The target image.\n return_scale (bool): Whether to return `w_scale` and `h_scale`.\n interpolation (str): Same as :func:`resize`.\n\n Returns:\n tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or\n `resized_img`.\n '
(h, w) = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation)
|
def rescale_size(old_size, scale, return_scale=False):
'Calculate the new size to be rescaled to.\n\n Args:\n old_size (tuple[int]): The old size of image.\n scale (float or tuple[int]): The scaling factor or maximum size.\n If it is a float number, then the image will be rescaled by this\n factor, else if it is a tuple of 2 integers, then the image will\n be rescaled as large as possible within the scale.\n return_scale (bool): Whether to return the scaling factor besides the\n rescaled image size.\n\n Returns:\n tuple[int]: The new rescaled image size.\n '
(w, h) = old_size
if isinstance(scale, (float, int)):
if (scale <= 0):
raise ValueError('Invalid scale {}, must be positive.'.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min((max_long_edge / max(h, w)), (max_short_edge / min(h, w)))
else:
raise TypeError('Scale must be a number or tuple of int, but got {}'.format(type(scale)))
new_size = _scale_size((w, h), scale_factor)
if return_scale:
return (new_size, scale_factor)
else:
return new_size
|
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
'Resize image while keeping the aspect ratio.\n\n Args:\n img (ndarray): The input image.\n scale (float or tuple[int]): The scaling factor or maximum size.\n If it is a float number, then the image will be rescaled by this\n factor, else if it is a tuple of 2 integers, then the image will\n be rescaled as large as possible within the scale.\n return_scale (bool): Whether to return the scaling factor besides the\n rescaled image.\n interpolation (str): Same as :func:`resize`.\n\n Returns:\n ndarray: The rescaled image.\n '
(h, w) = img.shape[:2]
(new_size, scale_factor) = rescale_size((w, h), scale, return_scale=True)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return (rescaled_img, scale_factor)
else:
return rescaled_img
|
def scatter(input, devices, streams=None):
'Scatters tensor across multiple GPUs.\n '
if (streams is None):
streams = ([None] * len(devices))
if isinstance(input, list):
chunk_size = (((len(input) - 1) // len(devices)) + 1)
outputs = [scatter(input[i], [devices[(i // chunk_size)]], [streams[(i // chunk_size)]]) for i in range(len(input))]
return outputs
elif isinstance(input, torch.Tensor):
output = input.contiguous()
stream = (streams[0] if (output.numel() > 0) else None)
with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
output = output.cuda(devices[0], non_blocking=True)
return output
else:
raise Exception('Unknown type {}.'.format(type(input)))
|
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = (len(output) // len(devices))
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[((i * chunk_size) + j)], [devices[i]], [streams[i]])
elif isinstance(output, torch.Tensor):
if (output.numel() != 0):
with torch.cuda.device(devices[0]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[0])
output.record_stream(main_stream)
else:
raise Exception('Unknown type {}.'.format(type(output)))
|
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if (input_device != (- 1)):
return input_device
return (- 1)
elif isinstance(input, torch.Tensor):
return (input.get_device() if input.is_cuda else (- 1))
else:
raise Exception('Unknown type {}.'.format(type(input)))
|
class Scatter(object):
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if (input_device == (- 1)):
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
if (streams is not None):
synchronize_stream(outputs, target_gpus, streams)
return tuple(outputs)
|
def collate(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack = True, e.g., images tensors\n 3. cpu_only = False, stack = False, e.g., gt bboxes\n '
if (not isinstance(batch, collections.Sequence)):
raise TypeError('{} is not supported.'.format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert ((len(batch) % samples_per_gpu) == 0)
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if (batch[i].pad_dims is not None):
ndim = batch[i].dim()
assert (ndim > batch[i].pad_dims)
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = batch[i].size((- dim))
for sample in batch[i:(i + samples_per_gpu)]:
for dim in range(0, (ndim - batch[i].pad_dims)):
assert (batch[i].size(dim) == sample.size(dim))
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
padded_samples = []
for sample in batch[i:(i + samples_per_gpu)]:
pad = [0 for _ in range((batch[i].pad_dims * 2))]
for dim in range(1, (batch[i].pad_dims + 1)):
pad[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
padded_samples.append(F.pad(sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif (batch[i].pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch[i:(i + samples_per_gpu)]]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]}
else:
return default_collate(batch)
|
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (not isinstance(args[0].data, torch.Tensor)):
raise AttributeError('{} has no attribute {} for type {}'.format(args[0].__class__.__name__, func.__name__, args[0].datatype))
return func(*args, **kwargs)
return wrapper
|
class DataContainer(object):
'A container for any type of objects.\n\n Typically tensors will be stacked in the collate function and sliced along\n some dimension in the scatter function. This behavior has some limitations.\n 1. All tensors have to be the same size.\n 2. Types are limited (numpy array or Tensor).\n\n We design `DataContainer` and `MMDataParallel` to overcome these\n limitations. The behavior can be either of the following.\n\n - copy to GPU, pad all tensors to the same size and stack them\n - copy to GPU without stacking\n - leave the objects as is and pass it to the model\n - pad_dims specifies the number of last few dimensions to do padding\n '
def __init__(self, data, stack=False, padding_value=0, cpu_only=False, pad_dims=2):
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert (pad_dims in [None, 1, 2, 3])
self._pad_dims = pad_dims
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self.data))
@property
def data(self):
return self._data
@property
def datatype(self):
if isinstance(self.data, torch.Tensor):
return self.data.type()
else:
return type(self.data)
@property
def cpu_only(self):
return self._cpu_only
@property
def stack(self):
return self._stack
@property
def padding_value(self):
return self._padding_value
@property
def pad_dims(self):
return self._pad_dims
@assert_tensor_type
def size(self, *args, **kwargs):
return self.data.size(*args, **kwargs)
@assert_tensor_type
def dim(self):
return self.data.dim()
|
class MMDataParallel(DataParallel):
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
class MMDistributedDataParallel(DistributedDataParallel):
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
class MMDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(MMDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buffers
self.broadcast_bucket_size = ((bucket_cap_mb * 1024) * 1024)
self._sync_params()
def _dist_broadcast_coalesced(self, tensors, buffer_size):
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for (tensor, synced) in zip(tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
module_states = list(self.module.state_dict().values())
if (len(module_states) > 0):
self._dist_broadcast_coalesced(module_states, self.broadcast_bucket_size)
if self.broadcast_buffers:
if (torch.__version__ < '1.0'):
buffers = [b.data for b in self.module._all_buffers()]
else:
buffers = [b.data for b in self.module.buffers()]
if (len(buffers) > 0):
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
(inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
|
def scatter(inputs, target_gpus, dim=0):
'Scatter inputs to target gpus.\n\n The only difference from original :func:`scatter` is to add support for\n :type:`~mmcv.parallel.DataContainer`.\n '
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return OrigScatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if (isinstance(obj, dict) and (len(obj) > 0)):
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
'Scatter with support for kwargs dictionary'
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs)
|
def load_state_dict(module, state_dict, strict=False, logger=None):
"Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.load_state_dict`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n Args:\n module (Module): Module that receives the state_dict.\n state_dict (OrderedDict): Weights.\n strict (bool): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.\n logger (:obj:`logging.Logger`, optional): Logger to log the error\n message. If not specified, print function will be used.\n "
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(module)
load = None
missing_keys = [key for key in all_missing_keys if ('num_batches_tracked' not in key)]
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(', '.join(unexpected_keys)))
if missing_keys:
err_msg.append('missing keys in source state_dict: {}\n'.format(', '.join(missing_keys)))
(rank, _) = get_dist_info()
if ((len(err_msg) > 0) and (rank == 0)):
err_msg.insert(0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif (logger is not None):
logger.warning(err_msg)
else:
print(err_msg)
|
def load_url_dist(url):
' In distributed setting, this function only download checkpoint at\n local rank 0 '
(rank, world_size) = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if (rank == 0):
checkpoint = model_zoo.load_url(url)
if (world_size > 1):
torch.distributed.barrier()
if (rank > 0):
checkpoint = model_zoo.load_url(url)
return checkpoint
|
def get_torchvision_models():
model_urls = dict()
for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module('torchvision.models.{}'.format(name))
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
|
def _load_checkpoint(filename, map_location=None):
'Load checkpoint from somewhere (modelzoo, file, url).\n\n Args:\n filename (str): Either a filepath or URI.\n map_location (str | None): Same as :func:`torch.load`. Default: None.\n\n Returns:\n dict | OrderedDict: The loaded checkpoint. It can be either an\n OrderedDict storing model weights or a dict containing other\n information, which depends on the checkpoint.\n '
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please use "torchvision://" instead')
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_name = filename[13:]
checkpoint = load_url_dist(open_mmlab_model_urls[model_name])
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
else:
if (not osp.isfile(filename)):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
|
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None):
'Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Either a filepath or URL or modelzoo://xxxxxxx.\n map_location (str): Same as :func:`torch.load`.\n strict (bool): Whether to allow different params for the model and\n checkpoint.\n logger (:mod:`logging.Logger` or None): The logger for error message.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n '
checkpoint = _load_checkpoint(filename, map_location)
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
state_dict = checkpoint['state_dict']
else:
raise RuntimeError('No state_dict found in checkpoint file {}'.format(filename))
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint
|
def weights_to_cpu(state_dict):
'Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n '
state_dict_cpu = OrderedDict()
for (key, val) in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
|
def save_checkpoint(model, filename, optimizer=None, meta=None):
'Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose params are to be saved.\n filename (str): Checkpoint filename.\n optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\n meta (dict, optional): Metadata to be saved in checkpoint.\n '
if (meta is None):
meta = {}
elif (not isinstance(meta, dict)):
raise TypeError('meta must be a dict or None, but got {}'.format(type(meta)))
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
mmcv.mkdir_or_exist(osp.dirname(filename))
if hasattr(model, 'module'):
model = model.module
checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(model.state_dict())}
if (optimizer is not None):
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, filename)
|
def init_dist(launcher, backend='nccl', **kwargs):
if (mp.get_start_method(allow_none=True) is None):
mp.set_start_method('spawn')
if (launcher == 'pytorch'):
_init_dist_pytorch(backend, **kwargs)
elif (launcher == 'mpi'):
_init_dist_mpi(backend, **kwargs)
elif (launcher == 'slurm'):
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
|
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((rank % num_gpus))
dist.init_process_group(backend=backend, **kwargs)
|
def _init_dist_mpi(backend, **kwargs):
raise NotImplementedError
|
def _init_dist_slurm(backend, port=29500, **kwargs):
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((proc_id % num_gpus))
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
|
def get_dist_info():
if (torch.__version__ < '1.0'):
initialized = dist._initialized
elif dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return (rank, world_size)
|
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
(rank, _) = get_dist_info()
if (rank == 0):
return func(*args, **kwargs)
return wrapper
|
@HOOKS.register_module
class CheckpointHook(Hook):
def __init__(self, interval=(- 1), save_optimizer=True, out_dir=None, **kwargs):
self.interval = interval
self.save_optimizer = save_optimizer
self.out_dir = out_dir
self.args = kwargs
@master_only
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
if (not self.out_dir):
self.out_dir = runner.work_dir
runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args)
|
@HOOKS.register_module
class ClosureHook(Hook):
def __init__(self, fn_name, fn):
assert hasattr(self, fn_name)
assert callable(fn)
setattr(self, fn_name, fn)
|
class Hook(object):
def before_run(self, runner):
pass
def after_run(self, runner):
pass
def before_epoch(self, runner):
pass
def after_epoch(self, runner):
pass
def before_iter(self, runner):
pass
def after_iter(self, runner):
pass
def before_train_epoch(self, runner):
self.before_epoch(runner)
def before_val_epoch(self, runner):
self.before_epoch(runner)
def after_train_epoch(self, runner):
self.after_epoch(runner)
def after_val_epoch(self, runner):
self.after_epoch(runner)
def before_train_iter(self, runner):
self.before_iter(runner)
def before_val_iter(self, runner):
self.before_iter(runner)
def after_train_iter(self, runner):
self.after_iter(runner)
def after_val_iter(self, runner):
self.after_iter(runner)
def every_n_epochs(self, runner, n):
return ((((runner.epoch + 1) % n) == 0) if (n > 0) else False)
def every_n_inner_iters(self, runner, n):
return ((((runner.inner_iter + 1) % n) == 0) if (n > 0) else False)
def every_n_iters(self, runner, n):
return ((((runner.iter + 1) % n) == 0) if (n > 0) else False)
def end_of_epoch(self, runner):
return ((runner.inner_iter + 1) == len(runner.data_loader))
|
@HOOKS.register_module
class IterTimerHook(Hook):
def before_epoch(self, runner):
self.t = time.time()
def before_iter(self, runner):
runner.log_buffer.update({'data_time': (time.time() - self.t)})
def after_iter(self, runner):
runner.log_buffer.update({'time': (time.time() - self.t)})
self.t = time.time()
|
class LoggerHook(Hook):
'Base class for logger hooks.\n\n Args:\n interval (int): Logging interval (every k iterations).\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`.\n reset_flag (bool): Whether to clear the output buffer after logging.\n '
__metaclass__ = ABCMeta
def __init__(self, interval=10, ignore_last=True, reset_flag=False):
self.interval = interval
self.ignore_last = ignore_last
self.reset_flag = reset_flag
@abstractmethod
def log(self, runner):
pass
def before_run(self, runner):
for hook in runner.hooks[::(- 1)]:
if isinstance(hook, LoggerHook):
hook.reset_flag = True
break
def before_epoch(self, runner):
runner.log_buffer.clear()
def after_train_iter(self, runner):
if self.every_n_inner_iters(runner, self.interval):
runner.log_buffer.average(self.interval)
elif (self.end_of_epoch(runner) and (not self.ignore_last)):
runner.log_buffer.average(self.interval)
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_train_epoch(self, runner):
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_val_epoch(self, runner):
runner.log_buffer.average()
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
|
def is_scalar(val, include_np=True, include_torch=True):
'Tell the input variable is a scalar or not.\n\n Args:\n val: Input variable.\n include_np (bool): Whether include 0-d np.ndarray as a scalar.\n include_torch (bool): Whether include 0-d torch.Tensor as a scalar.\n\n Returns:\n bool: True or False.\n '
if isinstance(val, numbers.Number):
return True
elif (include_np and isinstance(val, np.ndarray) and (val.ndim == 0)):
return True
elif (include_torch and isinstance(val, torch.Tensor) and (len(val) == 1)):
return True
else:
return False
|
@HOOKS.register_module
class PaviLoggerHook(LoggerHook):
def __init__(self, init_kwargs=None, add_graph=False, add_last_ckpt=False, interval=10, ignore_last=True, reset_flag=True):
super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.init_kwargs = init_kwargs
self.add_graph = add_graph
self.add_last_ckpt = add_last_ckpt
@master_only
def before_run(self, runner):
try:
from pavi import SummaryWriter
except ImportError:
raise ImportError('Please run "pip install pavi" to install pavi.')
self.run_name = runner.work_dir.split('/')[(- 1)]
if (not self.init_kwargs):
self.init_kwargs = dict()
self.init_kwargs['task'] = self.run_name
self.init_kwargs['model'] = runner._model_name
self.writer = SummaryWriter(**self.init_kwargs)
if self.add_graph:
self.writer.add_graph(runner.model)
@master_only
def log(self, runner):
tags = {}
for (tag, val) in runner.log_buffer.output.items():
if ((tag not in ['time', 'data_time']) and is_scalar(val)):
tags[tag] = val
if tags:
self.writer.add_scalars(runner.mode, tags, runner.iter)
@master_only
def after_run(self, runner):
if self.add_last_ckpt:
ckpt_path = osp.join(runner.work_dir, 'latest.pth')
self.writer.add_snapshot_file(tag=self.run_name, snapshot_file_path=ckpt_path, iteration=runner.iter)
|
@HOOKS.register_module
class TensorboardLoggerHook(LoggerHook):
def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=True):
super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.log_dir = log_dir
@master_only
def before_run(self, runner):
if ((torch.__version__ >= '1.1') and ('.' in torch.__version__)):
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError('Please run "pip install future tensorboard" to install the dependencies to use torch.utils.tensorboard (applicable to PyTorch 1.1 or higher)')
else:
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorboardX to use TensorboardLoggerHook.')
if (self.log_dir is None):
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
@master_only
def log(self, runner):
for var in runner.log_buffer.output:
if (var in ['time', 'data_time']):
continue
tag = '{}/{}'.format(var, runner.mode)
record = runner.log_buffer.output[var]
if isinstance(record, str):
self.writer.add_text(tag, record, runner.iter)
else:
self.writer.add_scalar(tag, runner.log_buffer.output[var], runner.iter)
@master_only
def after_run(self, runner):
self.writer.close()
|
@HOOKS.register_module
class TextLoggerHook(LoggerHook):
def __init__(self, interval=10, ignore_last=True, reset_flag=False):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.time_sec_tot = 0
def before_run(self, runner):
super(TextLoggerHook, self).before_run(runner)
self.start_iter = runner.iter
self.json_log_path = osp.join(runner.work_dir, '{}.log.json'.format(runner.timestamp))
if (runner.meta is not None):
self._dump_log(runner.meta, runner)
def _get_max_memory(self, runner):
mem = torch.cuda.max_memory_allocated()
mem_mb = torch.tensor([(mem / (1024 * 1024))], dtype=torch.int, device=torch.device('cuda'))
if (runner.world_size > 1):
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def _log_info(self, log_dict, runner):
if (runner.mode == 'train'):
log_str = 'Epoch [{}][{}/{}]\tlr: {:.5f}, '.format(log_dict['epoch'], log_dict['iter'], len(runner.data_loader), log_dict['lr'])
if ('time' in log_dict.keys()):
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1))
eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1))
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += 'eta: {}, '.format(eta_str)
log_str += 'time: {:.3f}, data_time: {:.3f}, '.format(log_dict['time'], log_dict['data_time'])
log_str += 'memory: {}, '.format(log_dict['memory'])
else:
log_str = 'Epoch({}) [{}][{}]\t'.format(log_dict['mode'], (log_dict['epoch'] - 1), log_dict['iter'])
log_items = []
for (name, val) in log_dict.items():
if (name in ['mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch']):
continue
if isinstance(val, float):
val = '{:.4f}'.format(val)
log_items.append('{}: {}'.format(name, val))
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def _dump_log(self, log_dict, runner):
json_log = OrderedDict()
for (k, v) in log_dict.items():
json_log[k] = self._round_float(v)
if (runner.rank == 0):
with open(self.json_log_path, 'a+') as f:
mmcv.dump(json_log, f, file_format='json')
f.write('\n')
def _round_float(self, items):
if isinstance(items, list):
return [self._round_float(item) for item in items]
elif isinstance(items, float):
return round(items, 5)
else:
return items
def log(self, runner):
log_dict = OrderedDict()
mode = ('train' if ('time' in runner.log_buffer.output) else 'val')
log_dict['mode'] = mode
log_dict['epoch'] = (runner.epoch + 1)
log_dict['iter'] = (runner.inner_iter + 1)
log_dict['lr'] = runner.current_lr()[0]
if (mode == 'train'):
log_dict['time'] = runner.log_buffer.output['time']
log_dict['data_time'] = runner.log_buffer.output['data_time']
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
for (name, val) in runner.log_buffer.output.items():
if (name in ['time', 'data_time']):
continue
log_dict[name] = val
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
|
@HOOKS.register_module
class WandbLoggerHook(LoggerHook):
def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=True):
super(WandbLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.import_wandb()
self.init_kwargs = init_kwargs
def import_wandb(self):
try:
import wandb
except ImportError:
raise ImportError('Please run "pip install wandb" to install wandb')
self.wandb = wandb
@master_only
def before_run(self, runner):
if (self.wandb is None):
self.import_wandb()
if self.init_kwargs:
self.wandb.init(**self.init_kwargs)
else:
self.wandb.init()
@master_only
def log(self, runner):
metrics = {}
for (var, val) in runner.log_buffer.output.items():
if (var in ['time', 'data_time']):
continue
tag = '{}/{}'.format(var, runner.mode)
if isinstance(val, numbers.Number):
metrics[tag] = val
if metrics:
self.wandb.log(metrics, step=runner.iter)
@master_only
def after_run(self, runner):
self.wandb.join()
|
class LrUpdaterHook(Hook):
"LR Scheduler in MMCV\n\n Args:\n by_epoch (bool): LR changes epoch by epoch\n warmup (string): Type of warmup used. It can be None(use no warmup),\n 'constant', 'linear' or 'exp'\n warmup_iters (int): The number of iterations or epochs that warmup\n lasts\n warmup_ratio (float): LR used at the beginning of warmup equals to\n warmup_ratio * initial_lr\n warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters\n means the number of epochs that warmup lasts, otherwise means the\n number of iteration that warmup lasts\n "
def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.1, warmup_by_epoch=False, **kwargs):
if (warmup is not None):
if (warmup not in ['constant', 'linear', 'exp']):
raise ValueError('"{}" is not a supported type for warming up, valid types are "constant" and "linear"'.format(warmup))
if (warmup is not None):
assert (warmup_iters > 0), '"warmup_iters" must be a positive integer'
assert (0 < warmup_ratio <= 1.0), '"warmup_ratio" must be in range (0,1]'
self.by_epoch = by_epoch
self.warmup = warmup
self.warmup_iters = warmup_iters
self.warmup_ratio = warmup_ratio
self.warmup_by_epoch = warmup_by_epoch
if self.warmup_by_epoch:
self.warmup_epochs = self.warmup_iters
self.warmup_iters = None
else:
self.warmup_epochs = None
self.base_lr = []
self.regular_lr = []
def _set_lr(self, runner, lr_groups):
for (param_group, lr) in zip(runner.optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def get_lr(self, runner, base_lr):
raise NotImplementedError
def get_regular_lr(self, runner):
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
def get_warmup_lr(self, cur_iters):
if (self.warmup == 'constant'):
warmup_lr = [(_lr * self.warmup_ratio) for _lr in self.regular_lr]
elif (self.warmup == 'linear'):
k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio))
warmup_lr = [(_lr * (1 - k)) for _lr in self.regular_lr]
elif (self.warmup == 'exp'):
k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters)))
warmup_lr = [(_lr * k) for _lr in self.regular_lr]
return warmup_lr
def before_run(self, runner):
for group in runner.optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.base_lr = [group['initial_lr'] for group in runner.optimizer.param_groups]
def before_train_epoch(self, runner):
if (not self.by_epoch):
return
if self.warmup_by_epoch:
epoch_len = len(runner.data_loader)
self.warmup_iters = (self.warmup_epochs * epoch_len)
self.regular_lr = self.get_regular_lr(runner)
self._set_lr(runner, self.regular_lr)
def before_train_iter(self, runner):
cur_iter = runner.iter
if (not self.by_epoch):
self.regular_lr = self.get_regular_lr(runner)
if ((self.warmup is None) or (cur_iter >= self.warmup_iters)):
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr)
elif self.by_epoch:
if ((self.warmup is None) or (cur_iter > self.warmup_iters)):
return
elif (cur_iter == self.warmup_iters):
self._set_lr(runner, self.regular_lr)
else:
warmup_lr = self.get_warmup_lr(cur_iter)
self._set_lr(runner, warmup_lr)
|
@HOOKS.register_module
class FixedLrUpdaterHook(LrUpdaterHook):
def __init__(self, **kwargs):
super(FixedLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
return base_lr
|
@HOOKS.register_module
class StepLrUpdaterHook(LrUpdaterHook):
def __init__(self, step, gamma=0.1, **kwargs):
assert isinstance(step, (list, int))
if isinstance(step, list):
for s in step:
assert (isinstance(s, int) and (s > 0))
elif isinstance(step, int):
assert (step > 0)
else:
raise TypeError('"step" must be a list or integer')
self.step = step
self.gamma = gamma
super(StepLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = (runner.epoch if self.by_epoch else runner.iter)
if isinstance(self.step, int):
return (base_lr * (self.gamma ** (progress // self.step)))
exp = len(self.step)
for (i, s) in enumerate(self.step):
if (progress < s):
exp = i
break
return (base_lr * (self.gamma ** exp))
|
@HOOKS.register_module
class ExpLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, **kwargs):
self.gamma = gamma
super(ExpLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = (runner.epoch if self.by_epoch else runner.iter)
return (base_lr * (self.gamma ** progress))
|
@HOOKS.register_module
class PolyLrUpdaterHook(LrUpdaterHook):
def __init__(self, power=1.0, min_lr=0.0, **kwargs):
self.power = power
self.min_lr = min_lr
super(PolyLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
coeff = ((1 - (progress / max_progress)) ** self.power)
return (((base_lr - self.min_lr) * coeff) + self.min_lr)
|
@HOOKS.register_module
class InvLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, power=1.0, **kwargs):
self.gamma = gamma
self.power = power
super(InvLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = (runner.epoch if self.by_epoch else runner.iter)
return (base_lr * ((1 + (self.gamma * progress)) ** (- self.power)))
|
@HOOKS.register_module
class CosineLrUpdaterHook(LrUpdaterHook):
def __init__(self, target_lr=0, **kwargs):
self.target_lr = target_lr
super(CosineLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
return (self.target_lr + ((0.5 * (base_lr - self.target_lr)) * (1 + cos((pi * (progress / max_progress))))))
|
@HOOKS.register_module
class EmptyCacheHook(Hook):
def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self, runner):
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner):
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner):
if self._after_epoch:
torch.cuda.empty_cache()
|
@HOOKS.register_module
class OptimizerHook(Hook):
def __init__(self, grad_clip=None):
self.grad_clip = grad_clip
def clip_grads(self, params):
clip_grad.clip_grad_norm_(filter((lambda p: p.requires_grad), params), **self.grad_clip)
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if (self.grad_clip is not None):
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
|
@HOOKS.register_module
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
runner.data_loader.sampler.set_epoch(runner.epoch)
|
class LogBuffer(object):
def __init__(self):
self.val_history = OrderedDict()
self.n_history = OrderedDict()
self.output = OrderedDict()
self.ready = False
def clear(self):
self.val_history.clear()
self.n_history.clear()
self.clear_output()
def clear_output(self):
self.output.clear()
self.ready = False
def update(self, vars, count=1):
assert isinstance(vars, dict)
for (key, var) in vars.items():
if (key not in self.val_history):
self.val_history[key] = []
self.n_history[key] = []
self.val_history[key].append(var)
self.n_history[key].append(count)
def average(self, n=0):
'Average latest n values or all values'
assert (n >= 0)
for key in self.val_history:
values = np.array(self.val_history[key][(- n):])
nums = np.array(self.n_history[key][(- n):])
avg = (np.sum((values * nums)) / np.sum(nums))
self.output[key] = avg
self.ready = True
|
def worker_func(model_cls, model_kwargs, checkpoint, dataset, data_func, gpu_id, idx_queue, result_queue):
model = model_cls(**model_kwargs)
load_checkpoint(model, checkpoint, map_location='cpu')
torch.cuda.set_device(gpu_id)
model.cuda()
model.eval()
with torch.no_grad():
while True:
idx = idx_queue.get()
data = dataset[idx]
result = model(**data_func(data, gpu_id))
result_queue.put((idx, result))
|
def parallel_test(model_cls, model_kwargs, checkpoint, dataset, data_func, gpus, workers_per_gpu=1):
'Parallel testing on multiple GPUs.\n\n Args:\n model_cls (type): Model class type.\n model_kwargs (dict): Arguments to init the model.\n checkpoint (str): Checkpoint filepath.\n dataset (:obj:`Dataset`): The dataset to be tested.\n data_func (callable): The function that generates model inputs.\n gpus (list[int]): GPU ids to be used.\n workers_per_gpu (int): Number of processes on each GPU. It is possible\n to run multiple workers on each GPU.\n\n Returns:\n list: Test results.\n '
ctx = multiprocessing.get_context('spawn')
idx_queue = ctx.Queue()
result_queue = ctx.Queue()
num_workers = (len(gpus) * workers_per_gpu)
workers = [ctx.Process(target=worker_func, args=(model_cls, model_kwargs, checkpoint, dataset, data_func, gpus[(i % len(gpus))], idx_queue, result_queue)) for i in range(num_workers)]
for w in workers:
w.daemon = True
w.start()
for i in range(len(dataset)):
idx_queue.put(i)
results = [None for _ in range(len(dataset))]
prog_bar = mmcv.ProgressBar(task_num=len(dataset))
for _ in range(len(dataset)):
(idx, res) = result_queue.get()
results[idx] = res
prog_bar.update()
print('\n')
for worker in workers:
worker.terminate()
return results
|
class Priority(Enum):
'Hook priority levels.\n\n +------------+------------+\n | Level | Value |\n +============+============+\n | HIGHEST | 0 |\n +------------+------------+\n | VERY_HIGH | 10 |\n +------------+------------+\n | HIGH | 30 |\n +------------+------------+\n | NORMAL | 50 |\n +------------+------------+\n | LOW | 70 |\n +------------+------------+\n | VERY_LOW | 90 |\n +------------+------------+\n | LOWEST | 100 |\n +------------+------------+\n '
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
NORMAL = 50
LOW = 70
VERY_LOW = 90
LOWEST = 100
|
def get_priority(priority):
'Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n '
if isinstance(priority, int):
if ((priority < 0) or (priority > 100)):
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
class Runner(object):
'A training helper for PyTorch.\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): If it is a dict,\n runner will construct an optimizer according to it.\n work_dir (str, optional): The working directory to save checkpoints\n and logs.\n log_level (int): Logging level.\n logger (:obj:`logging.Logger`): Custom logger. If `None`, use the\n default logger.\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n '
def __init__(self, model, batch_processor, optimizer=None, work_dir=None, log_level=logging.INFO, logger=None, meta=None):
assert callable(batch_processor)
self.model = model
if (optimizer is not None):
self.optimizer = self.init_optimizer(optimizer)
else:
self.optimizer = None
self.batch_processor = batch_processor
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif (work_dir is None):
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
(self._rank, self._world_size) = get_dist_info()
self.timestamp = get_time_str()
if (logger is None):
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
if (meta is not None):
assert isinstance(meta, dict), '"meta" must be a dict or None'
self.meta = meta
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
'str: Name of the model, usually the module class name.'
return self._model_name
@property
def rank(self):
'int: Rank of current process. (distributed training)'
return self._rank
@property
def world_size(self):
'int: Number of processes participating in the job.\n (distributed training)'
return self._world_size
@property
def hooks(self):
'list[:obj:`Hook`]: A list of registered hooks.'
return self._hooks
@property
def epoch(self):
'int: Current epoch.'
return self._epoch
@property
def iter(self):
'int: Current iteration.'
return self._iter
@property
def inner_iter(self):
'int: Iteration in an epoch.'
return self._inner_iter
@property
def max_epochs(self):
'int: Maximum training epochs.'
return self._max_epochs
@property
def max_iters(self):
'int: Maximum training iterations.'
return self._max_iters
def init_optimizer(self, optimizer):
"Init the optimizer.\n\n Args:\n optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an\n optimizer object or a dict used for constructing the optimizer.\n\n Returns:\n :obj:`~torch.optim.Optimizer`: An optimizer object.\n\n Examples:\n >>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)\n >>> type(runner.init_optimizer(optimizer))\n <class 'torch.optim.sgd.SGD'>\n "
if isinstance(optimizer, dict):
optimizer = obj_from_dict(optimizer, torch.optim, dict(params=self.model.parameters()))
elif (not isinstance(optimizer, torch.optim.Optimizer)):
raise TypeError('optimizer must be either an Optimizer object or a dict, but got {}'.format(type(optimizer)))
return optimizer
def _add_file_handler(self, logger, filename=None, mode='w', level=logging.INFO):
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
'Init the logger.\n\n Args:\n log_dir(str, optional): Log file directory. If not specified, no\n log file will be used.\n level (int or str): See the built-in python logging module.\n\n Returns:\n :obj:`~logging.Logger`: Python logger.\n '
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level)
logger = logging.getLogger(__name__)
if (log_dir and (self.rank == 0)):
filename = '{}.log'.format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
'Get current learning rates.\n\n Returns:\n list: Current learning rate of all param groups.\n '
if (self.optimizer is None):
raise RuntimeError('lr is not applicable because optimizer does not exist.')
return [group['lr'] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority='NORMAL'):
'Register a hook into the hook list.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n '
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
inserted = False
for i in range((len(self._hooks) - 1), (- 1), (- 1)):
if (priority >= self._hooks[i].priority):
self._hooks.insert((i + 1), hook)
inserted = True
break
if (not inserted):
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location='cpu', strict=False):
self.logger.info('load checkpoint from %s', filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True):
if (meta is None):
meta = dict(epoch=(self.epoch + 1), iter=self.iter)
else:
meta.update(epoch=(self.epoch + 1), iter=self.iter)
filename = filename_tmpl.format((self.epoch + 1))
filepath = osp.join(out_dir, filename)
optimizer = (self.optimizer if save_optimizer else None)
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
if create_symlink:
mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = (self._max_epochs * len(data_loader))
self.call_hook('before_train_epoch')
for (i, data_batch) in enumerate(data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
outputs = self.batch_processor(self.model, data_batch, train_mode=True, **kwargs)
if (not isinstance(outputs, dict)):
raise TypeError('batch_processor() must return a dict')
if ('log_vars' in outputs):
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
for (i, data_batch) in enumerate(data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
with torch.no_grad():
outputs = self.batch_processor(self.model, data_batch, train_mode=False, **kwargs)
if (not isinstance(outputs, dict)):
raise TypeError('batch_processor() must return a dict')
if ('log_vars' in outputs):
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch')
def resume(self, checkpoint, resume_optimizer=True, map_location='default'):
if (map_location == 'default'):
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id)))
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if (('optimizer' in checkpoint) and resume_optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n max_epochs (int): Total training epochs.\n "
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert (len(data_loaders) == len(workflow))
self._max_epochs = max_epochs
work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE')
self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)
self.call_hook('before_run')
while (self.epoch < max_epochs):
for (i, flow) in enumerate(workflow):
(mode, epochs) = flow
if isinstance(mode, str):
if (not hasattr(self, mode)):
raise ValueError('runner has no method named "{}" to run an epoch'.format(mode))
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError('mode in workflow must be a str or callable function, not {}'.format(type(mode)))
for _ in range(epochs):
if ((mode == 'train') and (self.epoch >= max_epochs)):
return
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1)
self.call_hook('after_run')
def register_lr_hook(self, lr_config):
if isinstance(lr_config, dict):
assert ('policy' in lr_config)
hook_type = (lr_config.pop('policy').title() + 'LrUpdaterHook')
lr_config['type'] = hook_type
hook = mmcv.build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook)
def register_optimizer_hook(self, optimizer_config):
if (optimizer_config is None):
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook)
def register_checkpoint_hook(self, checkpoint_config):
if (checkpoint_config is None):
return
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook)
def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = mmcv.build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='VERY_LOW')
def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None):
'Register default hooks for training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n '
self.register_lr_hook(lr_config)
self.register_optimizer_hook(optimizer_config)
self.register_checkpoint_hook(checkpoint_config)
self.register_hook(IterTimerHook())
self.register_logger_hooks(log_config)
|
def get_host_info():
return '{}@{}'.format(getuser(), gethostname())
|
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
|
def obj_from_dict(info, parent=None, default_args=None):
'Initialize an object from dict.\n\n The dict must contain the key "type", which indicates the object type, it\n can be either a string or type, such as "list" or ``list``. Remaining\n fields are treated as the arguments for constructing the object.\n\n Args:\n info (dict): Object types and arguments.\n parent (:class:`module`): Module which may containing expected object\n classes.\n default_args (dict, optional): Default arguments for initializing the\n object.\n\n Returns:\n any type: Object built from the dict.\n '
assert (isinstance(info, dict) and ('type' in info))
assert (isinstance(default_args, dict) or (default_args is None))
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if (parent is not None):
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif (not isinstance(obj_type, type)):
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
|
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, name))
except Exception as e:
ex = e
else:
return value
raise ex
|
def add_args(parser, cfg, prefix=''):
for (k, v) in cfg.items():
if isinstance(v, str):
parser.add_argument((('--' + prefix) + k))
elif isinstance(v, int):
parser.add_argument((('--' + prefix) + k), type=int)
elif isinstance(v, float):
parser.add_argument((('--' + prefix) + k), type=float)
elif isinstance(v, bool):
parser.add_argument((('--' + prefix) + k), action='store_true')
elif isinstance(v, dict):
add_args(parser, v, ((prefix + k) + '.'))
elif isinstance(v, abc.Iterable):
parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+')
else:
print('cannot parse key {} of type {}'.format((prefix + k), type(v)))
return parser
|
class Config(object):
'A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {\'b1\': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile(\'tests/data/config/a.py\')\n >>> cfg.filename\n "/home/kchen/projects/mmcv/tests/data/config/a.py"\n >>> cfg.item4\n \'test\'\n >>> cfg\n "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "\n "{\'item1\': [1, 2], \'item2\': {\'a\': 0}, \'item3\': True, \'item4\': \'test\'}"\n\n '
@staticmethod
def _file2dict(filename):
filename = osp.abspath(osp.expanduser(filename))
check_file_exist(filename)
if filename.endswith('.py'):
with tempfile.TemporaryDirectory() as temp_config_dir:
shutil.copyfile(filename, osp.join(temp_config_dir, '_tempconfig.py'))
sys.path.insert(0, temp_config_dir)
mod = import_module('_tempconfig')
sys.path.pop(0)
cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))}
del sys.modules['_tempconfig']
elif filename.endswith(('.yml', '.yaml', '.json')):
import mmcv
cfg_dict = mmcv.load(filename)
else:
raise IOError('Only py/yml/yaml/json type are supported now!')
cfg_text = (filename + '\n')
with open(filename, 'r') as f:
cfg_text += f.read()
if ('_base_' in cfg_dict):
cfg_dir = osp.dirname(filename)
base_filename = cfg_dict.pop('_base_')
base_filename = (base_filename if isinstance(base_filename, list) else [base_filename])
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
(_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if (len((base_cfg_dict.keys() & c.keys())) > 0):
raise KeyError('Duplicate key is not allowed among bases')
base_cfg_dict.update(c)
Config._merge_a_into_b(cfg_dict, base_cfg_dict)
cfg_dict = base_cfg_dict
cfg_text_list.append(cfg_text)
cfg_text = '\n'.join(cfg_text_list)
return (cfg_dict, cfg_text)
@staticmethod
def _merge_a_into_b(a, b):
for (k, v) in a.items():
if (isinstance(v, dict) and (k in b) and (not v.pop(DELETE_KEY, False))):
if (not isinstance(b[k], dict)):
raise TypeError('Cannot inherit key {} from base!'.format(k))
Config._merge_a_into_b(v, b[k])
else:
b[k] = v
@staticmethod
def fromfile(filename):
(cfg_dict, cfg_text) = Config._file2dict(filename)
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
@staticmethod
def auto_argparser(description=None):
'Generate argparser from config file automatically (experimental)\n '
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.fromfile(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return (parser, cfg)
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
if (cfg_dict is None):
cfg_dict = dict()
elif (not isinstance(cfg_dict, dict)):
raise TypeError('cfg_dict must be a dict, but got {}'.format(type(cfg_dict)))
super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
super(Config, self).__setattr__('_filename', filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, 'r') as f:
text = f.read()
else:
text = ''
super(Config, self).__setattr__('_text', text)
@property
def filename(self):
return self._filename
@property
def text(self):
return self._text
def __repr__(self):
return 'Config (path: {}): {}'.format(self.filename, self._cfg_dict.__repr__())
def __len__(self):
return len(self._cfg_dict)
def __getattr__(self, name):
return getattr(self._cfg_dict, name)
def __getitem__(self, name):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self):
cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
format_text = json.dumps(cfg_dict, indent=2)
return format_text
def merge_from_dict(self, options):
" Merge list into cfg_dict\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n Example,\n >>> options = {'model.backbone.depth': 50}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n\n Args:\n options (dict): dict of configs to merge from.\n "
option_cfg_dict = {}
for (full_key, v) in options.items():
d = option_cfg_dict
key_list = full_key.split('.')
for subkey in key_list[:(- 1)]:
d[subkey] = ConfigDict()
d = d[subkey]
subkey = key_list[(- 1)]
d[subkey] = v
cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
Config._merge_a_into_b(option_cfg_dict, cfg_dict)
|
def get_logger(name, log_file=None, log_level=logging.INFO):
'Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n "Error" thus be silent most of the time.\n\n Returns:\n logging.Logger: The expected logger.\n '
logger = logging.getLogger(name)
if (name in logger_initialized):
return logger
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if (dist.is_available() and dist.is_initialized()):
rank = dist.get_rank()
else:
rank = 0
if ((rank == 0) and (log_file is not None)):
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if (rank == 0):
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
|
def print_log(msg, logger=None, level=logging.INFO):
'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - "silent": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n '
if (logger is None):
print(msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif (logger == 'silent'):
pass
elif isinstance(logger, str):
_logger = get_logger(logger)
_logger.log(level, msg)
else:
raise TypeError('logger should be either a logging.Logger object, str, "silent" or None, but got {}'.format(type(logger)))
|
def is_str(x):
'Whether the input is an string instance.\n\n Note: This method is deprecated since python 2 is no longer supported.\n '
return isinstance(x, str)
|
def iter_cast(inputs, dst_type, return_type=None):
'Cast elements of an iterable object into some type.\n\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be\n converted to this type, otherwise an iterator.\n\n Returns:\n iterator or specified type: The converted object.\n '
if (not isinstance(inputs, abc.Iterable)):
raise TypeError('inputs must be an iterable object')
if (not isinstance(dst_type, type)):
raise TypeError('"dst_type" must be a valid type')
out_iterable = map(dst_type, inputs)
if (return_type is None):
return out_iterable
else:
return return_type(out_iterable)
|
def list_cast(inputs, dst_type):
'Cast elements of an iterable object into a list of some type.\n\n A partial method of :func:`iter_cast`.\n '
return iter_cast(inputs, dst_type, return_type=list)
|
def tuple_cast(inputs, dst_type):
'Cast elements of an iterable object into a tuple of some type.\n\n A partial method of :func:`iter_cast`.\n '
return iter_cast(inputs, dst_type, return_type=tuple)
|
def is_seq_of(seq, expected_type, seq_type=None):
'Check whether it is a sequence of some type.\n\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n\n Returns:\n bool: Whether the sequence is valid.\n '
if (seq_type is None):
exp_seq_type = abc.Sequence
else:
assert isinstance(seq_type, type)
exp_seq_type = seq_type
if (not isinstance(seq, exp_seq_type)):
return False
for item in seq:
if (not isinstance(item, expected_type)):
return False
return True
|
def is_list_of(seq, expected_type):
'Check whether it is a list of some type.\n\n A partial method of :func:`is_seq_of`.\n '
return is_seq_of(seq, expected_type, seq_type=list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.