code
stringlengths
17
6.64M
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=128): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (BatchNorm2d, AvgPool2d, Linear, Conv2d, ReLU), 'model_inputs': {'input0': {'shape': torch.Size([128, 3, 32, 32]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'WideResNet/Linear[fc]': {'shape': torch.Size([128, 10]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([128, 3, 32, 32]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/torch::add_16': {'shape': torch.Size([128, 64, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/torch::add_16': {'shape': torch.Size([128, 64, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'WideResNet/Linear[fc]': {'shape': torch.Size([128, 10]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['WideResNet/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'conv1', 'l_1': 'block1.layer.0.bn1', 'l_2': 'block1.layer.0.relu1', 'l_3': 'block1.layer.0.conv1', 'l_4': 'block1.layer.0.bn2', 'l_5': 'block1.layer.0.relu2', 'l_6': 'block1.layer.0.conv2', 'l_7': 'block1.layer.0.convShortcut', 'l_8': 'block1.layer.1.bn1', 'l_9': 'block1.layer.1.relu1', 'l_10': 'block1.layer.1.conv1', 'l_11': 'block1.layer.1.bn2', 'l_12': 'block1.layer.1.relu2', 'l_13': 'block1.layer.1.conv2'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_0 = self.l_7(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_8(t_1) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = torch.add(t_1, t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/BatchNorm2d[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]', 'WideResNet/BatchNorm2d[bn1]', 'WideResNet/ReLU[relu]', 'WideResNet/AvgPool2d[avg_pool]', 'WideResNet/Linear[fc]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'block2.layer.0.bn1', 'l_1': 'block2.layer.0.relu1', 'l_2': 'block2.layer.0.conv1', 'l_3': 'block2.layer.0.bn2', 'l_4': 'block2.layer.0.relu2', 'l_5': 'block2.layer.0.conv2', 'l_6': 'block2.layer.0.convShortcut', 'l_7': 'block2.layer.1.bn1', 'l_8': 'block2.layer.1.relu1', 'l_9': 'block2.layer.1.conv1', 'l_10': 'block2.layer.1.bn2', 'l_11': 'block2.layer.1.relu2', 'l_12': 'block2.layer.1.conv2', 'l_13': 'block3.layer.0.bn1', 'l_14': 'block3.layer.0.relu1', 'l_15': 'block3.layer.0.conv1', 'l_16': 'block3.layer.0.bn2', 'l_17': 'block3.layer.0.relu2', 'l_18': 'block3.layer.0.conv2', 'l_19': 'block3.layer.0.convShortcut', 'l_20': 'block3.layer.1.bn1', 'l_21': 'block3.layer.1.relu1', 'l_22': 'block3.layer.1.conv1', 'l_23': 'block3.layer.1.bn2', 'l_24': 'block3.layer.1.relu2', 'l_25': 'block3.layer.1.conv2', 'l_26': 'bn1', 'l_27': 'relu', 'l_28': 'avg_pool', 'l_29': 'fc'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_0 = self.l_1(t_0) t_1 = self.l_2(t_0) t_0 = self.l_6(t_0) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_7(t_1) t_0 = self.l_8(t_0) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = torch.add(t_1, t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_1 = self.l_15(t_0) t_0 = self.l_19(t_0) t_1 = self.l_16(t_1) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_20(t_1) t_0 = self.l_21(t_0) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = torch.add(t_1, t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = t_0.view((- 1), 256) t_0 = self.l_29(t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[nn.Module]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module)]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basick_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=True)
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=256): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Conv2d, Linear, AvgPool2d, Dropout, ReLU, GroupNorm), 'model_inputs': {'input0': {'shape': torch.Size([256, 3, 32, 32]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'WideResNet/Linear[fc]': {'shape': torch.Size([256, 100]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 3}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([256, 3, 32, 32]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/torch::add_18': {'shape': torch.Size([256, 160, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Dropout[dropout]': {'shape': torch.Size([256, 160, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 3}, 1: {'stage_cls': Partition1, 'inputs': {'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/torch::add_18': {'shape': torch.Size([256, 160, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Dropout[dropout]': {'shape': torch.Size([256, 160, 32, 32]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 2}, 2: {'stage_cls': Partition2, 'inputs': {'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/torch::add_59': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 1}, 3: {'stage_cls': Partition3, 'inputs': {'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/torch::add_59': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]': {'shape': torch.Size([256, 320, 16, 16]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'WideResNet/Linear[fc]': {'shape': torch.Size([256, 100]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['WideResNet/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Dropout[dropout]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/Dropout[dropout]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'conv1', 'l_1': 'block1.layer.0.bn1', 'l_2': 'block1.layer.0.relu1', 'l_3': 'block1.layer.0.conv1', 'l_4': 'block1.layer.0.bn2', 'l_5': 'block1.layer.0.relu2', 'l_6': 'block1.layer.0.dropout', 'l_7': 'block1.layer.0.conv2', 'l_8': 'block1.layer.0.convShortcut', 'l_9': 'block1.layer.1.bn1', 'l_10': 'block1.layer.1.relu1', 'l_11': 'block1.layer.1.conv1', 'l_12': 'block1.layer.1.bn2', 'l_13': 'block1.layer.1.relu2', 'l_14': 'block1.layer.1.dropout', 'l_15': 'block1.layer.1.conv2', 'l_16': 'block1.layer.2.bn1', 'l_17': 'block1.layer.2.relu1', 'l_18': 'block1.layer.2.conv1', 'l_19': 'block1.layer.2.bn2', 'l_20': 'block1.layer.2.relu2', 'l_21': 'block1.layer.2.dropout'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_0 = self.l_8(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_9(t_1) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = torch.add(t_1, t_0) t_1 = self.l_16(t_0) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) return list(flatten((t_0, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Dropout[dropout]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Dropout[dropout]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'block1.layer.2.conv2', 'l_1': 'block1.layer.3.bn1', 'l_2': 'block1.layer.3.relu1', 'l_3': 'block1.layer.3.conv1', 'l_4': 'block1.layer.3.bn2', 'l_5': 'block1.layer.3.relu2', 'l_6': 'block1.layer.3.dropout', 'l_7': 'block1.layer.3.conv2', 'l_8': 'block2.layer.0.bn1', 'l_9': 'block2.layer.0.relu1', 'l_10': 'block2.layer.0.conv1', 'l_11': 'block2.layer.0.bn2', 'l_12': 'block2.layer.0.relu2', 'l_13': 'block2.layer.0.dropout', 'l_14': 'block2.layer.0.conv2', 'l_15': 'block2.layer.0.convShortcut'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = torch.add(x0, t_0) t_1 = self.l_1(t_0) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = torch.add(t_0, t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_0 = self.l_10(t_1) t_1 = self.l_15(t_1) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) return list(flatten((t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/Dropout[dropout]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/Dropout[dropout]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[2]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/Dropout[dropout]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'block2.layer.1.bn1', 'l_1': 'block2.layer.1.relu1', 'l_2': 'block2.layer.1.conv1', 'l_3': 'block2.layer.1.bn2', 'l_4': 'block2.layer.1.relu2', 'l_5': 'block2.layer.1.dropout', 'l_6': 'block2.layer.1.conv2', 'l_7': 'block2.layer.2.bn1', 'l_8': 'block2.layer.2.relu1', 'l_9': 'block2.layer.2.conv1', 'l_10': 'block2.layer.2.bn2', 'l_11': 'block2.layer.2.relu2', 'l_12': 'block2.layer.2.dropout', 'l_13': 'block2.layer.2.conv2', 'l_14': 'block2.layer.3.bn1', 'l_15': 'block2.layer.3.relu1', 'l_16': 'block2.layer.3.conv1', 'l_17': 'block2.layer.3.bn2', 'l_18': 'block2.layer.3.relu2', 'l_19': 'block2.layer.3.dropout', 'l_20': 'block2.layer.3.conv2'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = torch.add(x0, x1) t_1 = self.l_0(t_0) t_1 = self.l_1(t_1) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_7(t_1) t_0 = self.l_8(t_0) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = torch.add(t_1, t_0) t_1 = self.l_14(t_0) t_1 = self.l_15(t_1) t_1 = self.l_16(t_1) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) return list(flatten((t_0, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Dropout[dropout]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/Dropout[dropout]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[1]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/Dropout[dropout]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[2]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/ReLU[relu1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/ReLU[relu2]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/Dropout[dropout]', 'WideResNet/NetworkBlock[block3]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]', 'WideResNet/GroupNorm[bn1]', 'WideResNet/ReLU[relu]', 'WideResNet/AvgPool2d[avg_pool]', 'WideResNet/Linear[fc]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'block3.layer.0.bn1', 'l_1': 'block3.layer.0.relu1', 'l_2': 'block3.layer.0.conv1', 'l_3': 'block3.layer.0.bn2', 'l_4': 'block3.layer.0.relu2', 'l_5': 'block3.layer.0.dropout', 'l_6': 'block3.layer.0.conv2', 'l_7': 'block3.layer.0.convShortcut', 'l_8': 'block3.layer.1.bn1', 'l_9': 'block3.layer.1.relu1', 'l_10': 'block3.layer.1.conv1', 'l_11': 'block3.layer.1.bn2', 'l_12': 'block3.layer.1.relu2', 'l_13': 'block3.layer.1.dropout', 'l_14': 'block3.layer.1.conv2', 'l_15': 'block3.layer.2.bn1', 'l_16': 'block3.layer.2.relu1', 'l_17': 'block3.layer.2.conv1', 'l_18': 'block3.layer.2.bn2', 'l_19': 'block3.layer.2.relu2', 'l_20': 'block3.layer.2.dropout', 'l_21': 'block3.layer.2.conv2', 'l_22': 'block3.layer.3.bn1', 'l_23': 'block3.layer.3.relu1', 'l_24': 'block3.layer.3.conv1', 'l_25': 'block3.layer.3.bn2', 'l_26': 'block3.layer.3.relu2', 'l_27': 'block3.layer.3.dropout', 'l_28': 'block3.layer.3.conv2', 'l_29': 'bn1', 'l_30': 'relu', 'l_31': 'avg_pool', 'l_32': 'fc'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = torch.add(x0, x1) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_1 = self.l_2(t_0) t_0 = self.l_7(t_0) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_8(t_1) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = torch.add(t_1, t_0) t_1 = self.l_15(t_0) t_1 = self.l_16(t_1) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = torch.add(t_0, t_1) t_0 = self.l_22(t_1) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = torch.add(t_1, t_0) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = self.l_31(t_0) t_0 = t_0.view((- 1), 640) t_0 = self.l_32(t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def parse_json_config(args, config=None, first=False): if (config is None): config = args.config with open(config, 'r') as f: output = json.load(f) def fix_base_cfg_path(base_config_path, is_relative): if is_relative: return os.path.join(os.path.dirname(config), base_config_path) return base_config_path if (first and args.base_config_path): output['base_config_path'] = args.base_config_path if ('base_config_path' in output): base_config_path = output.get('base_config_path') is_relative = output.get('base_config_path_is_relative', True) if isinstance(base_config_path, list): for i in base_config_path: parse_json_config(args, config=fix_base_cfg_path(i, is_relative)) else: parse_json_config(args, config=fix_base_cfg_path(base_config_path, is_relative)) if (args.base_config_path != base_config_path): warnings.warn('Config path changed by child') if (not os.path.exists(config)): raise ValueError(f'Config {config} does not exists') add_parsed_config_to_args(args, output)
def add_parsed_config_to_args(args, output: dict): for (key, value) in output.items(): if (output.get(f'{key}_from_cmd', False) or getattr(args, f'{key}_from_cmd', False)): if (not hasattr(args, key)): raise RuntimeError(f'-W- {key}_from_cmd=True but not set') continue key: str if (key.endswith('_from_cmd') and hasattr(args, key) and getattr(args, key)): warnings.warn(f'Taking {key} from cmd_args instead of overriding from json') continue setattr(args, key, value)
def empty_config(): return ml_collections.ConfigDict()
def _convert_dict_to_python_config(dd, prefix='config'): print(f'{prefix} = ConfigDict()') for i in dd: if isinstance(dd[i], str): print(f"{prefix}.{i} = '{dd[i]}'") elif isinstance(dd[i], dict): _convert_dict_to_python_config(dd[i], prefix=f'{prefix}.{i}') else: print(f'{prefix}.{i} = {dd[i]}')
def _convert_json_to_to_python_config(path): with open(path, 'r') as f: dd = json.load(f) _convert_dict_to_python_config(dd)
def get_all_options_config(): 'Simply converting all options to ml_collections.ConfigDict, Autogenerated' config = ml_collections.ConfigDict() config.logdir = 'logs/' config.out_dir = 'results/' config.out_filename = 'wrn_16x4_p4_msnag_clone_ga_no_wd' config.distributed_backend = 'mpi' config.data_propagator = 'auto' config.statistics = 'cv' config.model = 'wrn_16x4_p4' config.dataset = 'cifar10' config.trainer = ml_collections.ConfigDict() config.trainer.type = 'cv' config.trainer.args = ml_collections.ConfigDict() config.trainer.args.max_grad_norm = 0.25 config.trainer.args.always_calc_grad_norm = True config.bs_train = 128 config.bs_test = 200 config.num_data_workers = 6 config.optimizer = ml_collections.ConfigDict() config.optimizer.type = 'sgd1' config.optimizer.args = ml_collections.ConfigDict() config.optimizer.args.lr = 0.1 config.optimizer.args.weight_decay = 0.0005 config.optimizer.args.momentum = 0.9 config.optimizer.args.nesterov = False config.lr_scheduler = ml_collections.ConfigDict() config.lr_scheduler.type = 'get_multi_step_lr_schedule_with_warmup' config.lr_scheduler.preproc_args = ml_collections.ConfigDict() config.lr_scheduler.preproc_args.num_training_steps = 'epochs_to_steps' config.lr_scheduler.args = ml_collections.ConfigDict() config.lr_scheduler.args.num_warmup_steps = 5 config.lr_scheduler.args.milestones = [60, 120, 160] config.lr_scheduler.args.gamma = 0.2 config.lr_scheduler.args.last_epoch = (- 1) config.weight_prediction = ml_collections.ConfigDict() config.weight_prediction.type = 'msnag' config.weight_prediction.args = ml_collections.ConfigDict() config.weight_prediction.args.pred_mem = 'clone' config.weight_prediction.args.nag_with_predictor = True config.weight_prediction.args.sched_aware = False config.gap_aware = ml_collections.ConfigDict() config.gap_aware.type = 'sgd1' config.gap_aware.policy = 'almost_last_partition' config.gap_aware.args = ml_collections.ConfigDict() config.gap_aware.args.big_gamma = 0.999 config.gap_aware.args.epsilon = 1e-08 config.epochs = 200 config.steps = (- 1) config.seed = 42 config.num_chunks = 4 config.verbose_comm = True config.flush_rate = (- 1) config.train_batches_limit = (- 1) config.test_batches_limit = (- 1) config.weight_stashing = True config.work_scheduler = '1F1B' config.cpu = False config.seed_from_cmd = True config.bs_train_from_cmd = False config.auto_file_name = True config.stage_to_device_map = [0, 0, 0, 0] config.step_every = 1 config.log_frequency = 100 config.ddp_sim_num_gpus = 4 config.ddp = True config.cudnn_benchmark = True config.keep_buffers_alive = True config.no_recomputation = True config.nesterov_set_for_last_partition = True config.max_buffers = 1 config.gap_aware_just_loss = True config.base_config_path = 'configs/dummy_base.json' config.base_config_path_is_relative = True config.model_name_or_path = 'gpt2' config.overwrite_cache = False config.train_seq_len = 1024 config.valid_seq_len = 1024 config.test_seq_len = 1024 config.dont_drop_last = True config.checkpoints_save_dir = 'results/saved_checkpoints' config.checkpoints_save_name_prefix = 'my_checkpint_prefix' return config
def get_t5_t5_3b_p8_virtual_stages_boolq_common_config(): '\n Simplest config, Autogenerated from json file' config = ml_collections.ConfigDict() config.logdir = 'logs/t5/virtual_stages/' config.data_dir = '/home_local/saareliad/data' config.out_dir = 'results/t5/super_glue/boolq' config.auto_file_name = True config.out_filename = 'test_vs' config.distributed_backend = 'mpi' config.model = 't5_3b_tied_lmheads_512_4_8p_bw12_squad1_virtual_stages' config.stage_to_device_map = [0, 1, 2, 3, 4, 5, 6, 7, 6, 0, 5, 3, 2, 4, 1, 7] config.nprocs = 16 config.dataset = 't5_tfds' config.mixture_or_task_name = 'super_glue_boolq_v102' config.preproc_batch_size = 128 config.trainer = ml_collections.ConfigDict() config.trainer.type = 't5' config.trainer.args = ml_collections.ConfigDict() config.trainer.args.always_calc_grad_norm = False config.trainer.args.loss_multiplier = 2.59 config.statistics = 'squad_loss_per_batch' config.step_every = 5 config.bs_train = 4 config.bs_test = 4 config.max_seq_length = 512 config.answer_max_seq_length = 4 config.num_data_workers = 5 config.optimizer = ml_collections.ConfigDict() config.optimizer.type = 'adafactor' config.optimizer.args = ml_collections.ConfigDict() config.optimizer.args.lr = 0.001 config.optimizer.args.weight_decay = 0 config.optimizer.args.scale_parameter = True config.optimizer.args.relative_step = False config.lr_scheduler = ml_collections.ConfigDict() config.lr_scheduler.type = 'get_constant_schedule_with_warmup' config.lr_scheduler.preproc_args = ml_collections.ConfigDict() config.lr_scheduler.args = ml_collections.ConfigDict() config.lr_scheduler.args.num_warmup_steps = 200 config.lr_scheduler.args.last_epoch = (- 1) config.epochs = (- 1) config.steps = 3200 config.seed_from_cmd = False config.seed = 42 config.bs_train_from_cmd = False config.bs_test_from_cmd = False config.num_chunks = 1 config.verbose_comm = False config.flush_rate = (- 1) config.work_scheduler = 'virtual_stages_1f1b' config.supremum_staleness = 100 config.cudnn_benchmark = True config.max_buffers = 1 config.keep_buffers_alive = False config.train_batches_limit = (- 1) config.log_frequency = 200 config.model_name_or_path = 't5-3b' config.do_lower_case = True config.overwrite_cache = False config.dont_drop_last = True config.model_type = 't5' config.precomputed_masks = True config.save_checkpoints = True config.checkpoints_save_name_prefix = 'stale_adafactor' config.checkpoints_save_dir = '/nfs_Disk2/virtual_stages/checkpoints/t5/3b/boolq/stale/' config.load_model_one_by_one = False return config
def is_explicit_non_seperated_dataset(args): return ('_nonsep' in args.data_propagator)
def get_dataloaders(args, pipe_config: Optional[PipelineConfig]=None, dataset_keywords: Optional[Dict[(str, Any)]]=None): if (dataset_keywords is None): dataset_keywords = dict() if (not is_explicit_non_seperated_dataset(args)): (train_dl, test_dl, samplers, extra) = get_separate_dls_from_args(args, pipe_config=pipe_config, verbose=False, dataset_keywords=dataset_keywords, shuffle_train=getattr(args, 'shuffle_train', True)) else: raise NotImplementedError('now deprecated') return (train_dl, test_dl, samplers, extra)
def _get_separated_dataset(just, DATA_DIR, args, **dataset_keywords): if (just is None): return (TensorDataset(), TensorDataset()) return (Dataset(**args.cep_dataset_kwargs, just=just), Dataset(**args.cep_dataset_kwargs, just=just))
class SEP_CEP_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() (train_ds, test_ds) = _get_separated_dataset(**kw) self.train_ds = train_ds self.test_ds = test_ds def get_train_ds(self, **kw): return self.train_ds def get_test_ds(self, **kw): return self.test_ds def get_validation_ds(self, **kw): NotImplementedError() def get_modify_trainer_fn(self): pass def modify_dataloader_keywords(self, dataloader_keywords): return dataloader_keywords
def cifar_transformations(mean, std): train_transform = torchvision.transforms.Compose([torchvision.transforms.RandomCrop(32, padding=4), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)]) test_transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)]) return (train_transform, test_transform)
def cifar10_transformations(): mean = np.array([0.49139968, 0.48215841, 0.44653091]) std = np.array([0.24703223, 0.24348513, 0.26158784]) (train_transform, test_transform) = cifar_transformations(mean, std) return (train_transform, test_transform)
def cifar100_transformations(): mean = np.array([0.5071, 0.4867, 0.4408]) std = np.array([0.2675, 0.2565, 0.2761]) (train_transform, test_transform) = cifar_transformations(mean, std) return (train_transform, test_transform)
def imagenet_transformations(): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) return (train_transform, test_transform)
def get_cifar_10_train_test_ds(DATA_DIR=DEFAULT_DATA_DIR): (train_transform, test_transform) = cifar10_transformations() ds_train = CIFAR10(root=DATA_DIR, download=DOWNLOAD, train=True, transform=train_transform) ds_test = CIFAR10(root=DATA_DIR, download=DOWNLOAD, train=False, transform=test_transform) return (ds_train, ds_test)
def get_imagenet_train_test_ds(DATA_DIR=IMAGENET_ROOT_DIR): (train_transform, test_transform) = imagenet_transformations() traindir = os.path.join(DATA_DIR, 'train') valdir = os.path.join(DATA_DIR, 'val') ds_train = ImageFolder(traindir, transform=train_transform) ds_test = ImageFolder(valdir, transform=test_transform) return (ds_train, ds_test)
def get_cifar_100_train_test_ds(DATA_DIR=DEFAULT_DATA_DIR): (train_transform, test_transform) = cifar100_transformations() ds_train = CIFAR100(root=DATA_DIR, download=DOWNLOAD, train=True, transform=train_transform) ds_test = CIFAR100(root=DATA_DIR, download=DOWNLOAD, train=False, transform=test_transform) return (ds_train, ds_test)
def get_cv_train_test_dl(ds_train, ds_test, bs_train, bs_test, shuffle=True, pin_memory=True, **kw): dl_train = torch.utils.data.DataLoader(ds_train, bs_train, shuffle=shuffle, pin_memory=pin_memory, **kw) dl_test = torch.utils.data.DataLoader(ds_test, bs_test, shuffle=False, pin_memory=pin_memory, **kw) return (dl_train, dl_test)
class DatasetFolderJustX(DatasetFolder): def __init__(self, *args, **kw): super().__init__(*args, **kw) def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n sample\n ' (path, _) = self.samples[index] sample = self.loader(path) if (self.transform is not None): sample = self.transform(sample) return sample
class DatasetFolderJustY(DatasetFolder): def __init__(self, *args, **kw): super().__init__(*args, **kw) def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n target where target is class_index of the target class.\n ' (_, target) = self.samples[index] if (self.target_transform is not None): target = self.target_transform(target) return target
class ImageFolderJustX(DatasetFolderJustX): 'A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n ' def __init__(self, root, transform=None, target_transform=None, loader=default_loader, is_valid_file=None): super().__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), transform=transform, target_transform=target_transform, is_valid_file=is_valid_file) self.imgs = self.samples
class ImageFolderJustY(DatasetFolderJustY): 'A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n ' def __init__(self, root, transform=None, target_transform=None, loader=default_loader, is_valid_file=None): super().__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), transform=transform, target_transform=target_transform, is_valid_file=is_valid_file) self.imgs = self.samples
class CIFAR10JustX(CIFAR10): def __init__(self, *args, **kw): super().__init__(*args, **kw) def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: image\n ' img = self.data[index] img = Image.fromarray(img) if (self.transform is not None): img = self.transform(img) return img
class CIFAR10JustY(CIFAR10): def __init__(self, *args, **kw): super().__init__(*args, **kw) def __getitem__(self, index): '\n Args:\n index (int): Index\n\n Returns:\n tuple: target where target is index of the target class.\n ' target = self.targets[index] if (self.target_transform is not None): target = self.target_transform(target) return target
class CIFAR100JustX(CIFAR10JustX): '`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.\n\n This is a subclass of the `CIFAR10` Dataset.\n ' base_folder = 'cifar-100-python' url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' filename = 'cifar-100-python.tar.gz' tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']] test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']] meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
class CIFAR100JustY(CIFAR10JustY): '`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.\n\n This is a subclass of the `CIFAR10` Dataset.\n ' base_folder = 'cifar-100-python' url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' filename = 'cifar-100-python.tar.gz' tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']] test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']] meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
def get_cifar_100_just_x_or_y_ds(transform, train, **kw): just = kw['just'] DATA_DIR = kw.get('DATA_DIR', DEFAULT_DATA_DIR) just = just.lower() if (just == 'x'): ds_X = CIFAR100JustX(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform) return ds_X elif (just == 'y'): ds_Y = CIFAR100JustY(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform) return ds_Y else: raise ValueError(f"'just' should be in x,y. Got {just} instead.")
class SEP_CIFAR100_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = cifar100_transformations() return get_cifar_100_just_x_or_y_ds(train_transform, train=True, **kw) def get_test_ds(self, *args, **kw): (_, test_transform) = cifar100_transformations() return get_cifar_100_just_x_or_y_ds(test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError()
def get_cifar_10_just_x_or_y_ds(transform, train, **kw): just = kw['just'] DATA_DIR = kw.get('DATA_DIR', DEFAULT_DATA_DIR) if (not isinstance(just, str)): raise ValueError(f"'just' should be in x,y. Got {just} instead.") just = just.lower() if (just == 'x'): ds_X = CIFAR10JustX(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform) return ds_X elif (just == 'y'): ds_Y = CIFAR10JustY(root=DATA_DIR, download=DOWNLOAD, train=train, transform=transform) return ds_Y else: raise ValueError(f"'just' should be in x,y. Got {just} instead.")
class SEP_CIFAR10_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = cifar10_transformations() return get_cifar_10_just_x_or_y_ds(transform=train_transform, train=True, **kw) def get_test_ds(self, **kw): (_, test_transform) = cifar10_transformations() return get_cifar_10_just_x_or_y_ds(transform=test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError()
def get_imagenet_just_x_or_y_ds(transform, train, **kw): just = kw['just'] DATA_DIR = kw.get('DATA_DIR', DEFAULT_DATA_DIR) just = just.lower() data_dir = (os.path.join(DATA_DIR, 'val') if (not train) else os.path.join(DATA_DIR, 'train')) if (just == 'x'): ds_X = ImageFolderJustX(data_dir, transform=transform) return ds_X elif (just == 'y'): ds_Y = ImageFolderJustY(data_dir, transform=transform, target_transform=None) return ds_Y else: raise ValueError(f"'just' should be in x,y. Got {just} instead.")
class SEP_IMAGENET_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = imagenet_transformations() return get_imagenet_just_x_or_y_ds(transform=train_transform, train=True, **kw) def get_test_ds(self, **kw): (_, test_transform) = imagenet_transformations() return get_imagenet_just_x_or_y_ds(transform=test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError()
class CommonDatasetHandler(abc.ABC): def __init__(self): pass def get_train_ds(self, *args, **kw): raise NotImplementedError() def get_test_ds(self, *args, **kw): NotImplementedError() def get_validation_ds(self, *args, **kw): NotImplementedError() def get_modify_trainer_fn(self): pass def modify_dataloader_keywords(self, dataloader_keywords): return dataloader_keywords
def register_dataset(name, common_handler: Type[CommonDatasetHandler]): AVAILABLE_DATASETS[name] = common_handler
def register_hardcoded_just_xy_dataset(name): HARDCODED_JUST_XY.add(name)
def register_dataset_func(name, get_train_ds, get_test_ds=None, get_validation_ds=None, get_modify_trainer_fn=None, modify_dataloader_keywords=None): if ((get_test_ds is None) and (get_train_ds is None) and (get_validation_ds is None)): ValueError('must provide at least 1 none none dataset function') d = dict(get_train_ds=get_train_ds, get_test_ds=get_test_ds, get_validation_ds=get_validation_ds, get_modify_trainer_fn=get_modify_trainer_fn, modify_dataloader_keywords=modify_dataloader_keywords) for (k, v) in list(d.items()): if (v is None): d.pop(k) common_handler = type('AutoGeneratedDatasetHandler', (CommonDatasetHandler,), d) return register_dataset(name=name, common_handler=common_handler)
class MyNewDistributedSampler(DistributedSampler): MAX_INT = (2 ** 32) def __init__(self, experiment_manual_seed, *args, **kw): super().__init__(*args, **kw) self.experiment_manual_seed = experiment_manual_seed def __iter__(self): g = torch.Generator() g.manual_seed((((1 + self.epoch) * self.experiment_manual_seed) % self.MAX_INT)) if self.shuffle: indices = torch.randperm(len(self.dataset), generator=g).tolist() else: if ((self.num_replicas == 1) and (self.rank == 0)): return iter(range(len(self.dataset))) indices = list(range(len(self.dataset))) indices += indices[:(self.total_size - len(indices))] assert (len(indices) == self.total_size) indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) return iter(indices)
def download_file(url, DATA_DIR=DATA_DIR): local_filename = url.split('/')[(- 1)] local_filename = os.path.join(DATA_DIR, local_filename) if os.path.exists(local_filename): print(f'-I- file {local_filename} already exists, skipping download.') return local_filename with requests.get(url, stream=True) as r: r.raise_for_status() with open(local_filename, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) return local_filename
def download_wiki2(): URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip' path_to_zip_file = download_file(URL) print(f'-I- Donwloaded wikitext2 to {path_to_zip_file}. Extracting...') with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: zip_ref.extractall(DATA_DIR) print('-I- Done')
def download_wiki103(): URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip' path_to_zip_file = download_file(URL) print(f'-I- Donwloaded wikitext103 to {path_to_zip_file}. Extracting...') with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: zip_ref.extractall(DATA_DIR) print('-I- Done')
def download_squad(): ' Download Squad datasets ' script = f'''pushd {DATA_DIR} if [ ! -d squad1 ] ; then mkdir squad1 cd squad1 wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json wget https://raw.githubusercontent.com/allenai/bi-att-flow/master/squad/evaluate-v1.1.py cd .. fi if [ ! -d squad2 ] ; then mkdir squad2 cd squad2 wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json curl https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ > evaluate-v2.0.py cd .. fi popd ''' os.system(("bash -c '%s'" % script))
def download_glue(): data_dir = os.path.join(DATA_DIR, 'glue_data') subprocess.call(['python', 'data/download/download_glue_data.py', '--data_dir', data_dir, '--tasks', 'all'])
def download_and_extract(task, data_dir): print(('Downloading and extracting %s...' % task)) data_file = ('%s.zip' % task) urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print('\tCompleted!')
def format_mrpc(data_dir, path_to_data): print('Processing MRPC...') mrpc_dir = os.path.join(data_dir, 'MRPC') if (not os.path.isdir(mrpc_dir)): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, 'msr_paraphrase_train.txt') mrpc_test_file = os.path.join(path_to_data, 'msr_paraphrase_test.txt') else: print(('Local MRPC data not specified, downloading data from %s' % MRPC_TRAIN)) mrpc_train_file = os.path.join(mrpc_dir, 'msr_paraphrase_train.txt') mrpc_test_file = os.path.join(mrpc_dir, 'msr_paraphrase_test.txt') urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) assert os.path.isfile(mrpc_train_file), ('Train data not found at %s' % mrpc_train_file) assert os.path.isfile(mrpc_test_file), ('Test data not found at %s' % mrpc_test_file) urllib.request.urlretrieve(TASK2PATH['MRPC'], os.path.join(mrpc_dir, 'dev_ids.tsv')) dev_ids = [] with open(os.path.join(mrpc_dir, 'dev_ids.tsv'), encoding='utf8') as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split('\t')) with open(mrpc_train_file, encoding='utf8') as data_fh, open(os.path.join(mrpc_dir, 'train.tsv'), 'w', encoding='utf8') as train_fh, open(os.path.join(mrpc_dir, 'dev.tsv'), 'w', encoding='utf8') as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: (label, id1, id2, s1, s2) = row.strip().split('\t') if ([id1, id2] in dev_ids): dev_fh.write(('%s\t%s\t%s\t%s\t%s\n' % (label, id1, id2, s1, s2))) else: train_fh.write(('%s\t%s\t%s\t%s\t%s\n' % (label, id1, id2, s1, s2))) with open(mrpc_test_file, encoding='utf8') as data_fh, open(os.path.join(mrpc_dir, 'test.tsv'), 'w', encoding='utf8') as test_fh: header = data_fh.readline() test_fh.write('index\t#1 ID\t#2 ID\t#1 String\t#2 String\n') for (idx, row) in enumerate(data_fh): (label, id1, id2, s1, s2) = row.strip().split('\t') test_fh.write(('%d\t%s\t%s\t%s\t%s\n' % (idx, id1, id2, s1, s2))) print('\tCompleted!')
def download_diagnostic(data_dir): print('Downloading and extracting diagnostic...') if (not os.path.isdir(os.path.join(data_dir, 'diagnostic'))): os.mkdir(os.path.join(data_dir, 'diagnostic')) data_file = os.path.join(data_dir, 'diagnostic', 'diagnostic.tsv') urllib.request.urlretrieve(TASK2PATH['diagnostic'], data_file) print('\tCompleted!') return
def get_tasks(task_names): task_names = task_names.split(',') if ('all' in task_names): tasks = TASKS else: tasks = [] for task_name in task_names: assert (task_name in TASKS), ('Task %s not found!' % task_name) tasks.append(task_name) return tasks
def main(arguments): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data') parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', type=str, default='all') parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', type=str, default='') args = parser.parse_args(arguments) if (not os.path.isdir(args.data_dir)): os.mkdir(args.data_dir) tasks = get_tasks(args.tasks) for task in tasks: if (task == 'MRPC'): format_mrpc(args.data_dir, args.path_to_mrpc) elif (task == 'diagnostic'): download_diagnostic(args.data_dir) else: download_and_extract(task, args.data_dir)
def _is_hardcoded_xy(args): is_hardcoded_xy = (args.dataset in HARDCODED_JUST_XY) return is_hardcoded_xy
def get_just(args, pipe_config=None): is_hardcoded_xy = _is_hardcoded_xy(args) if is_hardcoded_xy: if (pipe_config is None): warnings.warn('using hardcoded xy without pipe config (to be deprecated)') if (args.stage == 0): just = 'x' elif (args.stage == (args.num_stages - 1)): just = 'y' else: just = None else: pipe_config: PipelineConfig my_depth = pipe_config.get_depth_for_stage(args.stage) if ((my_depth > 0) and pipe_config.get_dataset_inputs_for_stage(args.stage)): just = 'x' elif (my_depth == 0): just = 'y' else: just = None else: pipe_config: PipelineConfig inputs_from_dl = pipe_config.get_dataset_inputs_for_stage(args.stage) just = inputs_from_dl print(f'stage{args.stage}: inferred inputs from config: {just}') return just
def get_dataloader_keywords(args): dl_kw = dict() if args.cpu: dl_kw['pin_memory'] = False else: dl_kw['pin_memory'] = True dl_kw['num_workers'] = args.num_data_workers dl_kw['drop_last'] = True if getattr(args, 'dont_drop_last', False): dl_kw['drop_last'] = False return dl_kw
def get_data_dir(args): DATA_DIR = getattr(args, 'data_dir', DEFAULT_DATA_DIR) DATA_DIR = (DATA_DIR if DATA_DIR else DEFAULT_DATA_DIR) return DATA_DIR
def get_separate_dls_from_args(args, pipe_config: Optional[PipelineConfig]=None, dataset_keywords: Optional[Dict[(str, Any)]]=None, verbose=False, shuffle_train=True): if (dataset_keywords is None): dataset_keywords = dict() just = get_just(args, pipe_config=pipe_config) if ((not just) and (not getattr(args, 'load_extra_inputs', False))): return (None, None, [], None) data_dir = get_data_dir(args) dataloader_keywords = get_dataloader_keywords(args) assert ('shuffle' not in dataloader_keywords), str(dataloader_keywords) experiment_manual_seed = torch.initial_seed() try: handler = AVAILABLE_DATASETS[args.dataset](just=just, DATA_DIR=data_dir, args=args, **dataset_keywords) except KeyError as e: print('available datasets', AVAILABLE_DATASETS.keys()) raise e ds_train = handler.get_train_ds(just=just, DATA_DIR=data_dir, args=args, **dataset_keywords) ds_test = handler.get_test_ds(just=just, DATA_DIR=data_dir, args=args, **dataset_keywords) dataloader_keywords = handler.modify_dataloader_keywords(dataloader_keywords) extra = handler.get_modify_trainer_fn() train_sampler = MyNewDistributedSampler(experiment_manual_seed, ds_train, num_replicas=1, rank=0, shuffle=shuffle_train) test_sampler = (MyNewDistributedSampler(experiment_manual_seed, ds_test, num_replicas=1, rank=0, shuffle=False) if (ds_test is not None) else None) dl_train = DataLoader(ds_train, args.bs_train, shuffle=False, sampler=train_sampler, **dataloader_keywords) dl_test = (DataLoader(ds_test, args.bs_test, shuffle=False, sampler=test_sampler, **dataloader_keywords) if (ds_test is not None) else None) if verbose: n_samples_train = (len(dl_train) * args.bs_train) n_samples_test = ((len(dl_test) * args.bs_test) if (dl_test is not None) else 0) print(f'Train: {n_samples_train} samples') print(f'Test: {n_samples_test} samples') if extra: if isinstance(extra, list): assert (len(extra) == 1) extra = extra[0] return (dl_train, dl_test, list(filter(None, [train_sampler, test_sampler])), extra)
def add_dataset_argument(parser, default='cifar10', required=False): parser.add_argument('--dataset', default=default, choices=list(AVAILABLE_DATASETS.keys()), required=required)
class GlueLoss(torch.nn.Module): def __init__(self, num_labels): super().__init__() self.num_labels = num_labels if (self.num_labels == 1): self.loss = nn.MSELoss() else: self.loss = nn.CrossEntropyLoss() def forward(self, logits, labels): if (self.num_labels == 1): loss = self.loss(logits.view((- 1)), labels.view((- 1))) else: try: loss = self.loss(logits.view((- 1), self.num_labels), labels.view((- 1))) except Exception as e: print(self.num_labels, logits.shape, logits.view((- 1), self.num_labels).shape, labels.shape, labels.view((- 1)).shape) raise e return loss