code
stringlengths
17
6.64M
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=32): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (LayerNorm, Conv2d, GELU, Linear, Dropout, Identity), 'model_inputs': {'input0': {'shape': torch.Size([32, 3, 384, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([32, 1000]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([32, 3, 384, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[0]/Tensor::__add___73': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___78': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___80': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___82': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Tensor::__matmul___110': {'shape': torch.Size([32, 12, 577, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[0]/Tensor::__add___73': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___78': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___80': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Size::__getitem___82': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Tensor::__matmul___110': {'shape': torch.Size([32, 12, 577, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[3]/Tensor::__add___232': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Tensor::reshape_273': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[3]/Tensor::__add___232': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Tensor::reshape_273': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[6]/Tensor::__add___391': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Tensor::reshape_432': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[6]/Tensor::__add___391': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Tensor::reshape_432': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___497': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___497': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[9]/Tensor::__add___550': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[9]/Tensor::__add___550': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]': {'shape': torch.Size([32, 577, 768]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([32, 1000]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['VisionTransformer/PatchEmbed[patch_embed]/Conv2d[proj]', 'VisionTransformer/Dropout[pos_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[attn_drop]'] TENSORS = ['VisionTransformer/Parameter[cls_token]', 'VisionTransformer/Parameter[pos_embed]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'patch_embed.proj', 'l_1': 'pos_drop', 'l_2': 'blocks.0.norm1', 'l_3': 'blocks.0.attn.qkv', 'l_4': 'blocks.0.attn.attn_drop', 'l_5': 'blocks.0.attn.proj', 'l_6': 'blocks.0.attn.proj_drop', 'l_7': 'blocks.0.drop_path', 'l_8': 'blocks.0.norm2', 'l_9': 'blocks.0.mlp.fc1', 'l_10': 'blocks.0.mlp.act', 'l_11': 'blocks.0.mlp.drop', 'l_12': 'blocks.0.mlp.fc2', 'l_13': 'blocks.0.mlp.drop', 'l_14': 'blocks.0.drop_path', 'l_15': 'blocks.1.norm1', 'l_16': 'blocks.1.attn.qkv', 'l_17': 'blocks.1.attn.attn_drop', 'p_0': 'cls_token', 'p_1': 'pos_embed'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = x0.shape t_0 = t_0[0] t_1 = self.l_0(x0) t_1 = t_1.flatten(2) t_1 = t_1.transpose(1, 2) t_0 = self.p_0.expand(t_0, (- 1), (- 1)) t_1 = (t_0, t_1) t_1 = torch.cat(t_1, dim=1) t_1 = (t_1 + self.p_1) t_1 = self.l_1(t_1) t_0 = self.l_2(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_3(t_0) t_5 = (t_2 // 12) t_5 = t_0.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_1 + t_2) t_1 = self.l_8(t_2) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = self.l_11(t_1) t_1 = self.l_12(t_1) t_1 = self.l_13(t_1) t_1 = self.l_14(t_1) t_1 = (t_2 + t_1) t_2 = self.l_15(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 12) t_6 = t_2.reshape(t_3, t_5, 3, 12, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_17(t_0) t_6 = (t_0 @ t_6) return list(flatten((t_1, t_3, t_5, t_4, t_6))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'blocks.1.attn.proj', 'l_1': 'blocks.1.attn.proj_drop', 'l_2': 'blocks.1.drop_path', 'l_3': 'blocks.1.norm2', 'l_4': 'blocks.1.mlp.fc1', 'l_5': 'blocks.1.mlp.act', 'l_6': 'blocks.1.mlp.drop', 'l_7': 'blocks.1.mlp.fc2', 'l_8': 'blocks.1.mlp.drop', 'l_9': 'blocks.1.drop_path', 'l_10': 'blocks.2.norm1', 'l_11': 'blocks.2.attn.qkv', 'l_12': 'blocks.2.attn.attn_drop', 'l_13': 'blocks.2.attn.proj', 'l_14': 'blocks.2.attn.proj_drop', 'l_15': 'blocks.2.drop_path', 'l_16': 'blocks.2.norm2', 'l_17': 'blocks.2.mlp.fc1', 'l_18': 'blocks.2.mlp.act', 'l_19': 'blocks.2.mlp.drop', 'l_20': 'blocks.2.mlp.fc2'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = x4.transpose(1, 2) t_0 = t_0.reshape(x1, x2, x3) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = (t_0 + t_1) t_0 = self.l_10(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_11(t_0) t_5 = (t_2 // 12) t_5 = t_0.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_12(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = self.l_15(t_2) t_2 = (t_1 + t_2) t_1 = self.l_16(t_2) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) return list(flatten((t_2, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[attn_drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.2.mlp.drop', 'l_1': 'blocks.2.drop_path', 'l_2': 'blocks.3.norm1', 'l_3': 'blocks.3.attn.qkv', 'l_4': 'blocks.3.attn.attn_drop', 'l_5': 'blocks.3.attn.proj', 'l_6': 'blocks.3.attn.proj_drop', 'l_7': 'blocks.3.drop_path', 'l_8': 'blocks.3.norm2', 'l_9': 'blocks.3.mlp.fc1', 'l_10': 'blocks.3.mlp.act', 'l_11': 'blocks.3.mlp.drop', 'l_12': 'blocks.3.mlp.fc2', 'l_13': 'blocks.3.mlp.drop', 'l_14': 'blocks.3.drop_path', 'l_15': 'blocks.4.norm1', 'l_16': 'blocks.4.attn.qkv', 'l_17': 'blocks.4.attn.attn_drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 12) t_5 = t_1.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 12) t_6 = t_2.reshape(t_3, t_5, 3, 12, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) return list(flatten((t_0, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.4.attn.proj', 'l_1': 'blocks.4.attn.proj_drop', 'l_2': 'blocks.4.drop_path', 'l_3': 'blocks.4.norm2', 'l_4': 'blocks.4.mlp.fc1', 'l_5': 'blocks.4.mlp.act', 'l_6': 'blocks.4.mlp.drop', 'l_7': 'blocks.4.mlp.fc2', 'l_8': 'blocks.4.mlp.drop', 'l_9': 'blocks.4.drop_path', 'l_10': 'blocks.5.norm1', 'l_11': 'blocks.5.attn.qkv', 'l_12': 'blocks.5.attn.attn_drop', 'l_13': 'blocks.5.attn.proj', 'l_14': 'blocks.5.attn.proj_drop', 'l_15': 'blocks.5.drop_path', 'l_16': 'blocks.5.norm2', 'l_17': 'blocks.5.mlp.fc1', 'l_18': 'blocks.5.mlp.act', 'l_19': 'blocks.5.mlp.drop', 'l_20': 'blocks.5.mlp.fc2'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = (t_0 + t_1) t_0 = self.l_10(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_11(t_0) t_5 = (t_2 // 12) t_5 = t_0.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_12(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = self.l_15(t_2) t_2 = (t_1 + t_2) t_1 = self.l_16(t_2) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) return list(flatten((t_2, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[attn_drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.5.mlp.drop', 'l_1': 'blocks.5.drop_path', 'l_2': 'blocks.6.norm1', 'l_3': 'blocks.6.attn.qkv', 'l_4': 'blocks.6.attn.attn_drop', 'l_5': 'blocks.6.attn.proj', 'l_6': 'blocks.6.attn.proj_drop', 'l_7': 'blocks.6.drop_path', 'l_8': 'blocks.6.norm2', 'l_9': 'blocks.6.mlp.fc1', 'l_10': 'blocks.6.mlp.act', 'l_11': 'blocks.6.mlp.drop', 'l_12': 'blocks.6.mlp.fc2', 'l_13': 'blocks.6.mlp.drop', 'l_14': 'blocks.6.drop_path', 'l_15': 'blocks.7.norm1', 'l_16': 'blocks.7.attn.qkv', 'l_17': 'blocks.7.attn.attn_drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 12) t_5 = t_1.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 12) t_6 = t_2.reshape(t_3, t_5, 3, 12, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) return list(flatten((t_0, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.7.attn.proj', 'l_1': 'blocks.7.attn.proj_drop', 'l_2': 'blocks.7.drop_path', 'l_3': 'blocks.7.norm2', 'l_4': 'blocks.7.mlp.fc1', 'l_5': 'blocks.7.mlp.act', 'l_6': 'blocks.7.mlp.drop', 'l_7': 'blocks.7.mlp.fc2', 'l_8': 'blocks.7.mlp.drop', 'l_9': 'blocks.7.drop_path', 'l_10': 'blocks.8.norm1', 'l_11': 'blocks.8.attn.qkv', 'l_12': 'blocks.8.attn.attn_drop', 'l_13': 'blocks.8.attn.proj', 'l_14': 'blocks.8.attn.proj_drop', 'l_15': 'blocks.8.drop_path', 'l_16': 'blocks.8.norm2', 'l_17': 'blocks.8.mlp.fc1', 'l_18': 'blocks.8.mlp.act', 'l_19': 'blocks.8.mlp.drop', 'l_20': 'blocks.8.mlp.fc2', 'l_21': 'blocks.8.mlp.drop', 'l_22': 'blocks.8.drop_path', 'l_23': 'blocks.9.norm1'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = (t_0 + t_1) t_0 = self.l_10(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_11(t_0) t_5 = (t_2 // 12) t_5 = t_0.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_12(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = self.l_15(t_2) t_2 = (t_1 + t_2) t_1 = self.l_16(t_2) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = (t_2 + t_1) t_2 = self.l_23(t_1) return list(flatten((t_1, t_2))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.9.attn.qkv', 'l_1': 'blocks.9.attn.attn_drop', 'l_2': 'blocks.9.attn.proj', 'l_3': 'blocks.9.attn.proj_drop', 'l_4': 'blocks.9.drop_path', 'l_5': 'blocks.9.norm2', 'l_6': 'blocks.9.mlp.fc1', 'l_7': 'blocks.9.mlp.act', 'l_8': 'blocks.9.mlp.drop', 'l_9': 'blocks.9.mlp.fc2', 'l_10': 'blocks.9.mlp.drop', 'l_11': 'blocks.9.drop_path', 'l_12': 'blocks.10.norm1', 'l_13': 'blocks.10.attn.qkv', 'l_14': 'blocks.10.attn.attn_drop', 'l_15': 'blocks.10.attn.proj'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = x1.shape t_1 = t_0[0] t_2 = t_0[1] t_0 = t_0[2] t_3 = self.l_0(x1) t_4 = (t_0 // 12) t_4 = t_3.reshape(t_1, t_2, 3, 12, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_3 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_1(t_5) t_4 = (t_5 @ t_4) t_4 = t_4.transpose(1, 2) t_0 = t_4.reshape(t_1, t_2, t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = self.l_4(t_0) t_0 = (x0 + t_0) t_2 = self.l_5(t_0) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = self.l_8(t_2) t_2 = self.l_9(t_2) t_2 = self.l_10(t_2) t_2 = self.l_11(t_2) t_2 = (t_0 + t_2) t_0 = self.l_12(t_2) t_1 = t_0.shape t_4 = t_1[0] t_5 = t_1[1] t_1 = t_1[2] t_0 = self.l_13(t_0) t_3 = (t_1 // 12) t_3 = t_0.reshape(t_4, t_5, 3, 12, t_3) t_3 = t_3.permute(2, 0, 3, 1, 4) t_0 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_14(t_6) t_3 = (t_6 @ t_3) t_3 = t_3.transpose(1, 2) t_1 = t_3.reshape(t_4, t_5, t_1) t_1 = self.l_15(t_1) return list(flatten((t_2, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/LayerNorm[norm]', 'VisionTransformer/Linear[head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.10.attn.proj_drop', 'l_1': 'blocks.10.drop_path', 'l_2': 'blocks.10.norm2', 'l_3': 'blocks.10.mlp.fc1', 'l_4': 'blocks.10.mlp.act', 'l_5': 'blocks.10.mlp.drop', 'l_6': 'blocks.10.mlp.fc2', 'l_7': 'blocks.10.mlp.drop', 'l_8': 'blocks.10.drop_path', 'l_9': 'blocks.11.norm1', 'l_10': 'blocks.11.attn.qkv', 'l_11': 'blocks.11.attn.attn_drop', 'l_12': 'blocks.11.attn.proj', 'l_13': 'blocks.11.attn.proj_drop', 'l_14': 'blocks.11.drop_path', 'l_15': 'blocks.11.norm2', 'l_16': 'blocks.11.mlp.fc1', 'l_17': 'blocks.11.mlp.act', 'l_18': 'blocks.11.mlp.drop', 'l_19': 'blocks.11.mlp.fc2', 'l_20': 'blocks.11.mlp.drop', 'l_21': 'blocks.11.drop_path', 'l_22': 'norm', 'l_23': 'head'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = (t_0 + t_1) t_0 = self.l_9(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_10(t_0) t_5 = (t_2 // 12) t_5 = t_0.reshape(t_3, t_4, 3, 12, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_11(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_12(t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = (t_1 + t_2) t_1 = self.l_15(t_2) t_1 = self.l_16(t_1) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = (t_2 + t_1) t_1 = self.l_22(t_1) t_1 = t_1[(slice(None, None, None), 0)] t_1 = self.l_23(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=True)
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=128): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Linear, Conv2d, LayerNorm, Dropout, Identity, GELU), 'model_inputs': {'input0': {'shape': torch.Size([128, 3, 384, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([128, 100]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([128, 3, 384, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[14]/Tensor::__add___807': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[14]/Tensor::__add___807': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[16]/Tensor::__add___921': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___926': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___928': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___930': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Tensor::__matmul___958': {'shape': torch.Size([128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[16]/Tensor::__add___921': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___926': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___928': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___930': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Tensor::__matmul___958': {'shape': torch.Size([128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[19]/Tensor::__add___1080': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1085': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1087': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1089': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Tensor::permute_1101': {'shape': torch.Size([3, 128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[19]/Tensor::__add___1080': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1085': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1087': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Size::__getitem___1089': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Tensor::permute_1101': {'shape': torch.Size([3, 128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'created_by': 6}}, 'outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([128, 100]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['VisionTransformer/PatchEmbed[patch_embed]/Conv2d[proj]', 'VisionTransformer/Dropout[pos_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]'] TENSORS = ['VisionTransformer/Parameter[cls_token]', 'VisionTransformer/Parameter[pos_embed]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'patch_embed.proj', 'l_1': 'pos_drop', 'l_2': 'blocks.0.norm1', 'l_3': 'blocks.0.attn.qkv', 'l_4': 'blocks.0.attn.attn_drop', 'l_5': 'blocks.0.attn.proj', 'l_6': 'blocks.0.attn.proj_drop', 'l_7': 'blocks.0.drop_path', 'l_8': 'blocks.0.norm2', 'l_9': 'blocks.0.mlp.fc1', 'l_10': 'blocks.0.mlp.act', 'l_11': 'blocks.0.mlp.drop', 'l_12': 'blocks.0.mlp.fc2', 'l_13': 'blocks.0.mlp.drop', 'l_14': 'blocks.0.drop_path', 'l_15': 'blocks.1.norm1', 'l_16': 'blocks.1.attn.qkv', 'l_17': 'blocks.1.attn.attn_drop', 'l_18': 'blocks.1.attn.proj', 'l_19': 'blocks.1.attn.proj_drop', 'l_20': 'blocks.1.drop_path', 'l_21': 'blocks.1.norm2', 'l_22': 'blocks.1.mlp.fc1', 'l_23': 'blocks.1.mlp.act', 'l_24': 'blocks.1.mlp.drop', 'l_25': 'blocks.1.mlp.fc2', 'l_26': 'blocks.1.mlp.drop', 'l_27': 'blocks.1.drop_path', 'l_28': 'blocks.2.norm1', 'l_29': 'blocks.2.attn.qkv', 'l_30': 'blocks.2.attn.attn_drop', 'l_31': 'blocks.2.attn.proj', 'l_32': 'blocks.2.attn.proj_drop', 'l_33': 'blocks.2.drop_path', 'l_34': 'blocks.2.norm2', 'l_35': 'blocks.2.mlp.fc1', 'l_36': 'blocks.2.mlp.act', 'l_37': 'blocks.2.mlp.drop', 'l_38': 'blocks.2.mlp.fc2', 'p_0': 'cls_token', 'p_1': 'pos_embed'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = x0.shape t_0 = t_0[0] t_1 = self.l_0(x0) t_1 = t_1.flatten(2) t_1 = t_1.transpose(1, 2) t_0 = self.p_0.expand(t_0, (- 1), (- 1)) t_1 = (t_0, t_1) t_1 = torch.cat(t_1, dim=1) t_1 = (t_1 + self.p_1) t_1 = self.l_1(t_1) t_0 = self.l_2(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_3(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_1 + t_2) t_1 = self.l_8(t_2) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = self.l_11(t_1) t_1 = self.l_12(t_1) t_1 = self.l_13(t_1) t_1 = self.l_14(t_1) t_1 = (t_2 + t_1) t_2 = self.l_15(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_17(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_1 + t_4) t_1 = self.l_21(t_4) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = self.l_24(t_1) t_1 = self.l_25(t_1) t_1 = self.l_26(t_1) t_1 = self.l_27(t_1) t_1 = (t_4 + t_1) t_4 = self.l_28(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_0 = (t_5 // 16) t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_4 = t_0[0] t_2 = t_0[1] t_0 = t_0[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_0 = (t_2 @ t_0) t_0 = t_0.transpose(1, 2) t_5 = t_0.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_1 + t_5) t_1 = self.l_34(t_5) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = self.l_37(t_1) t_1 = self.l_38(t_1) return list(flatten((t_5, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.2.mlp.drop', 'l_1': 'blocks.2.drop_path', 'l_2': 'blocks.3.norm1', 'l_3': 'blocks.3.attn.qkv', 'l_4': 'blocks.3.attn.attn_drop', 'l_5': 'blocks.3.attn.proj', 'l_6': 'blocks.3.attn.proj_drop', 'l_7': 'blocks.3.drop_path', 'l_8': 'blocks.3.norm2', 'l_9': 'blocks.3.mlp.fc1', 'l_10': 'blocks.3.mlp.act', 'l_11': 'blocks.3.mlp.drop', 'l_12': 'blocks.3.mlp.fc2', 'l_13': 'blocks.3.mlp.drop', 'l_14': 'blocks.3.drop_path', 'l_15': 'blocks.4.norm1', 'l_16': 'blocks.4.attn.qkv', 'l_17': 'blocks.4.attn.attn_drop', 'l_18': 'blocks.4.attn.proj', 'l_19': 'blocks.4.attn.proj_drop', 'l_20': 'blocks.4.drop_path', 'l_21': 'blocks.4.norm2', 'l_22': 'blocks.4.mlp.fc1', 'l_23': 'blocks.4.mlp.act', 'l_24': 'blocks.4.mlp.drop', 'l_25': 'blocks.4.mlp.fc2', 'l_26': 'blocks.4.mlp.drop', 'l_27': 'blocks.4.drop_path', 'l_28': 'blocks.5.norm1', 'l_29': 'blocks.5.attn.qkv', 'l_30': 'blocks.5.attn.attn_drop', 'l_31': 'blocks.5.attn.proj', 'l_32': 'blocks.5.attn.proj_drop', 'l_33': 'blocks.5.drop_path', 'l_34': 'blocks.5.norm2', 'l_35': 'blocks.5.mlp.fc1', 'l_36': 'blocks.5.mlp.act', 'l_37': 'blocks.5.mlp.drop', 'l_38': 'blocks.5.mlp.fc2'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_0 + t_4) t_0 = self.l_21(t_4) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = (t_4 + t_0) t_4 = self.l_28(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_0 + t_5) t_0 = self.l_34(t_5) t_0 = self.l_35(t_0) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) t_0 = self.l_38(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.5.mlp.drop', 'l_1': 'blocks.5.drop_path', 'l_2': 'blocks.6.norm1', 'l_3': 'blocks.6.attn.qkv', 'l_4': 'blocks.6.attn.attn_drop', 'l_5': 'blocks.6.attn.proj', 'l_6': 'blocks.6.attn.proj_drop', 'l_7': 'blocks.6.drop_path', 'l_8': 'blocks.6.norm2', 'l_9': 'blocks.6.mlp.fc1', 'l_10': 'blocks.6.mlp.act', 'l_11': 'blocks.6.mlp.drop', 'l_12': 'blocks.6.mlp.fc2', 'l_13': 'blocks.6.mlp.drop', 'l_14': 'blocks.6.drop_path', 'l_15': 'blocks.7.norm1', 'l_16': 'blocks.7.attn.qkv', 'l_17': 'blocks.7.attn.attn_drop', 'l_18': 'blocks.7.attn.proj', 'l_19': 'blocks.7.attn.proj_drop', 'l_20': 'blocks.7.drop_path', 'l_21': 'blocks.7.norm2', 'l_22': 'blocks.7.mlp.fc1', 'l_23': 'blocks.7.mlp.act', 'l_24': 'blocks.7.mlp.drop', 'l_25': 'blocks.7.mlp.fc2', 'l_26': 'blocks.7.mlp.drop', 'l_27': 'blocks.7.drop_path', 'l_28': 'blocks.8.norm1', 'l_29': 'blocks.8.attn.qkv', 'l_30': 'blocks.8.attn.attn_drop', 'l_31': 'blocks.8.attn.proj', 'l_32': 'blocks.8.attn.proj_drop', 'l_33': 'blocks.8.drop_path', 'l_34': 'blocks.8.norm2', 'l_35': 'blocks.8.mlp.fc1', 'l_36': 'blocks.8.mlp.act', 'l_37': 'blocks.8.mlp.drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_0 + t_4) t_0 = self.l_21(t_4) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = (t_4 + t_0) t_4 = self.l_28(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_0 + t_5) t_0 = self.l_34(t_5) t_0 = self.l_35(t_0) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.8.mlp.fc2', 'l_1': 'blocks.8.mlp.drop', 'l_2': 'blocks.8.drop_path', 'l_3': 'blocks.9.norm1', 'l_4': 'blocks.9.attn.qkv', 'l_5': 'blocks.9.attn.attn_drop', 'l_6': 'blocks.9.attn.proj', 'l_7': 'blocks.9.attn.proj_drop', 'l_8': 'blocks.9.drop_path', 'l_9': 'blocks.9.norm2', 'l_10': 'blocks.9.mlp.fc1', 'l_11': 'blocks.9.mlp.act', 'l_12': 'blocks.9.mlp.drop', 'l_13': 'blocks.9.mlp.fc2', 'l_14': 'blocks.9.mlp.drop', 'l_15': 'blocks.9.drop_path', 'l_16': 'blocks.10.norm1', 'l_17': 'blocks.10.attn.qkv', 'l_18': 'blocks.10.attn.attn_drop', 'l_19': 'blocks.10.attn.proj', 'l_20': 'blocks.10.attn.proj_drop', 'l_21': 'blocks.10.drop_path', 'l_22': 'blocks.10.norm2', 'l_23': 'blocks.10.mlp.fc1', 'l_24': 'blocks.10.mlp.act', 'l_25': 'blocks.10.mlp.drop', 'l_26': 'blocks.10.mlp.fc2', 'l_27': 'blocks.10.mlp.drop', 'l_28': 'blocks.10.drop_path', 'l_29': 'blocks.11.norm1', 'l_30': 'blocks.11.attn.qkv', 'l_31': 'blocks.11.attn.attn_drop', 'l_32': 'blocks.11.attn.proj', 'l_33': 'blocks.11.attn.proj_drop', 'l_34': 'blocks.11.drop_path', 'l_35': 'blocks.11.norm2', 'l_36': 'blocks.11.mlp.fc1'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_4(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_5(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = self.l_8(t_2) t_2 = (t_0 + t_2) t_0 = self.l_9(t_2) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = (t_2 + t_0) t_2 = self.l_16(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_17(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_18(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = self.l_21(t_4) t_4 = (t_0 + t_4) t_0 = self.l_22(t_4) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = (t_4 + t_0) t_4 = self.l_29(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_30(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_31(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = self.l_34(t_5) t_5 = (t_0 + t_5) t_0 = self.l_35(t_5) t_0 = self.l_36(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.11.mlp.act', 'l_1': 'blocks.11.mlp.drop', 'l_2': 'blocks.11.mlp.fc2', 'l_3': 'blocks.11.mlp.drop', 'l_4': 'blocks.11.drop_path', 'l_5': 'blocks.12.norm1', 'l_6': 'blocks.12.attn.qkv', 'l_7': 'blocks.12.attn.attn_drop', 'l_8': 'blocks.12.attn.proj', 'l_9': 'blocks.12.attn.proj_drop', 'l_10': 'blocks.12.drop_path', 'l_11': 'blocks.12.norm2', 'l_12': 'blocks.12.mlp.fc1', 'l_13': 'blocks.12.mlp.act', 'l_14': 'blocks.12.mlp.drop', 'l_15': 'blocks.12.mlp.fc2', 'l_16': 'blocks.12.mlp.drop', 'l_17': 'blocks.12.drop_path', 'l_18': 'blocks.13.norm1', 'l_19': 'blocks.13.attn.qkv', 'l_20': 'blocks.13.attn.attn_drop', 'l_21': 'blocks.13.attn.proj', 'l_22': 'blocks.13.attn.proj_drop', 'l_23': 'blocks.13.drop_path', 'l_24': 'blocks.13.norm2', 'l_25': 'blocks.13.mlp.fc1', 'l_26': 'blocks.13.mlp.act', 'l_27': 'blocks.13.mlp.drop', 'l_28': 'blocks.13.mlp.fc2', 'l_29': 'blocks.13.mlp.drop', 'l_30': 'blocks.13.drop_path', 'l_31': 'blocks.14.norm1', 'l_32': 'blocks.14.attn.qkv', 'l_33': 'blocks.14.attn.attn_drop', 'l_34': 'blocks.14.attn.proj', 'l_35': 'blocks.14.attn.proj_drop', 'l_36': 'blocks.14.drop_path', 'l_37': 'blocks.14.norm2'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = self.l_4(t_0) t_0 = (x0 + t_0) t_1 = self.l_5(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_6(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_7(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_8(t_2) t_2 = self.l_9(t_2) t_2 = self.l_10(t_2) t_2 = (t_0 + t_2) t_0 = self.l_11(t_2) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = self.l_17(t_0) t_0 = (t_2 + t_0) t_2 = self.l_18(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_19(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_20(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_21(t_4) t_4 = self.l_22(t_4) t_4 = self.l_23(t_4) t_4 = (t_0 + t_4) t_0 = self.l_24(t_4) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = (t_4 + t_0) t_4 = self.l_31(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_32(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_33(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_34(t_5) t_5 = self.l_35(t_5) t_5 = self.l_36(t_5) t_5 = (t_0 + t_5) t_0 = self.l_37(t_5) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[attn_drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.14.mlp.fc1', 'l_1': 'blocks.14.mlp.act', 'l_2': 'blocks.14.mlp.drop', 'l_3': 'blocks.14.mlp.fc2', 'l_4': 'blocks.14.mlp.drop', 'l_5': 'blocks.14.drop_path', 'l_6': 'blocks.15.norm1', 'l_7': 'blocks.15.attn.qkv', 'l_8': 'blocks.15.attn.attn_drop', 'l_9': 'blocks.15.attn.proj', 'l_10': 'blocks.15.attn.proj_drop', 'l_11': 'blocks.15.drop_path', 'l_12': 'blocks.15.norm2', 'l_13': 'blocks.15.mlp.fc1', 'l_14': 'blocks.15.mlp.act', 'l_15': 'blocks.15.mlp.drop', 'l_16': 'blocks.15.mlp.fc2', 'l_17': 'blocks.15.mlp.drop', 'l_18': 'blocks.15.drop_path', 'l_19': 'blocks.16.norm1', 'l_20': 'blocks.16.attn.qkv', 'l_21': 'blocks.16.attn.attn_drop', 'l_22': 'blocks.16.attn.proj', 'l_23': 'blocks.16.attn.proj_drop', 'l_24': 'blocks.16.drop_path', 'l_25': 'blocks.16.norm2', 'l_26': 'blocks.16.mlp.fc1', 'l_27': 'blocks.16.mlp.act', 'l_28': 'blocks.16.mlp.drop', 'l_29': 'blocks.16.mlp.fc2', 'l_30': 'blocks.16.mlp.drop', 'l_31': 'blocks.16.drop_path', 'l_32': 'blocks.17.norm1', 'l_33': 'blocks.17.attn.qkv', 'l_34': 'blocks.17.attn.attn_drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = self.l_4(t_0) t_0 = self.l_5(t_0) t_0 = (x0 + t_0) t_1 = self.l_6(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_7(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_8(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_9(t_2) t_2 = self.l_10(t_2) t_2 = self.l_11(t_2) t_2 = (t_0 + t_2) t_0 = self.l_12(t_2) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = self.l_17(t_0) t_0 = self.l_18(t_0) t_0 = (t_2 + t_0) t_2 = self.l_19(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_20(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_21(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_22(t_4) t_4 = self.l_23(t_4) t_4 = self.l_24(t_4) t_4 = (t_0 + t_4) t_0 = self.l_25(t_4) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = self.l_31(t_0) t_0 = (t_4 + t_0) t_4 = self.l_32(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_33(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_34(t_2) t_1 = (t_2 @ t_1) return list(flatten((t_0, t_3, t_6, t_5, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'blocks.17.attn.proj', 'l_1': 'blocks.17.attn.proj_drop', 'l_2': 'blocks.17.drop_path', 'l_3': 'blocks.17.norm2', 'l_4': 'blocks.17.mlp.fc1', 'l_5': 'blocks.17.mlp.act', 'l_6': 'blocks.17.mlp.drop', 'l_7': 'blocks.17.mlp.fc2', 'l_8': 'blocks.17.mlp.drop', 'l_9': 'blocks.17.drop_path', 'l_10': 'blocks.18.norm1', 'l_11': 'blocks.18.attn.qkv', 'l_12': 'blocks.18.attn.attn_drop', 'l_13': 'blocks.18.attn.proj', 'l_14': 'blocks.18.attn.proj_drop', 'l_15': 'blocks.18.drop_path', 'l_16': 'blocks.18.norm2', 'l_17': 'blocks.18.mlp.fc1', 'l_18': 'blocks.18.mlp.act', 'l_19': 'blocks.18.mlp.drop', 'l_20': 'blocks.18.mlp.fc2', 'l_21': 'blocks.18.mlp.drop', 'l_22': 'blocks.18.drop_path', 'l_23': 'blocks.19.norm1', 'l_24': 'blocks.19.attn.qkv', 'l_25': 'blocks.19.attn.attn_drop', 'l_26': 'blocks.19.attn.proj', 'l_27': 'blocks.19.attn.proj_drop', 'l_28': 'blocks.19.drop_path', 'l_29': 'blocks.19.norm2', 'l_30': 'blocks.19.mlp.fc1', 'l_31': 'blocks.19.mlp.act', 'l_32': 'blocks.19.mlp.drop', 'l_33': 'blocks.19.mlp.fc2', 'l_34': 'blocks.19.mlp.drop', 'l_35': 'blocks.19.drop_path', 'l_36': 'blocks.20.norm1', 'l_37': 'blocks.20.attn.qkv'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = x4.transpose(1, 2) t_0 = t_0.reshape(x1, x2, x3) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = (t_0 + t_1) t_0 = self.l_10(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_11(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_12(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = self.l_15(t_2) t_2 = (t_1 + t_2) t_1 = self.l_16(t_2) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = (t_2 + t_1) t_2 = self.l_23(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_24(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_25(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_26(t_4) t_4 = self.l_27(t_4) t_4 = self.l_28(t_4) t_4 = (t_1 + t_4) t_1 = self.l_29(t_4) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = (t_4 + t_1) t_4 = self.l_36(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_37(t_4) t_0 = (t_5 // 16) t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) return list(flatten((t_1, t_3, t_6, t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/LayerNorm[norm]', 'VisionTransformer/Linear[head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'blocks.20.attn.attn_drop', 'l_1': 'blocks.20.attn.proj', 'l_2': 'blocks.20.attn.proj_drop', 'l_3': 'blocks.20.drop_path', 'l_4': 'blocks.20.norm2', 'l_5': 'blocks.20.mlp.fc1', 'l_6': 'blocks.20.mlp.act', 'l_7': 'blocks.20.mlp.drop', 'l_8': 'blocks.20.mlp.fc2', 'l_9': 'blocks.20.mlp.drop', 'l_10': 'blocks.20.drop_path', 'l_11': 'blocks.21.norm1', 'l_12': 'blocks.21.attn.qkv', 'l_13': 'blocks.21.attn.attn_drop', 'l_14': 'blocks.21.attn.proj', 'l_15': 'blocks.21.attn.proj_drop', 'l_16': 'blocks.21.drop_path', 'l_17': 'blocks.21.norm2', 'l_18': 'blocks.21.mlp.fc1', 'l_19': 'blocks.21.mlp.act', 'l_20': 'blocks.21.mlp.drop', 'l_21': 'blocks.21.mlp.fc2', 'l_22': 'blocks.21.mlp.drop', 'l_23': 'blocks.21.drop_path', 'l_24': 'blocks.22.norm1', 'l_25': 'blocks.22.attn.qkv', 'l_26': 'blocks.22.attn.attn_drop', 'l_27': 'blocks.22.attn.proj', 'l_28': 'blocks.22.attn.proj_drop', 'l_29': 'blocks.22.drop_path', 'l_30': 'blocks.22.norm2', 'l_31': 'blocks.22.mlp.fc1', 'l_32': 'blocks.22.mlp.act', 'l_33': 'blocks.22.mlp.drop', 'l_34': 'blocks.22.mlp.fc2', 'l_35': 'blocks.22.mlp.drop', 'l_36': 'blocks.22.drop_path', 'l_37': 'blocks.23.norm1', 'l_38': 'blocks.23.attn.qkv', 'l_39': 'blocks.23.attn.attn_drop', 'l_40': 'blocks.23.attn.proj', 'l_41': 'blocks.23.attn.proj_drop', 'l_42': 'blocks.23.drop_path', 'l_43': 'blocks.23.norm2', 'l_44': 'blocks.23.mlp.fc1', 'l_45': 'blocks.23.mlp.act', 'l_46': 'blocks.23.mlp.drop', 'l_47': 'blocks.23.mlp.fc2', 'l_48': 'blocks.23.mlp.drop', 'l_49': 'blocks.23.drop_path', 'l_50': 'norm', 'l_51': 'head'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = x4[0] t_1 = x4[1] t_2 = x4[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_0 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_0(t_1) t_2 = (t_1 @ t_2) t_2 = t_2.transpose(1, 2) t_2 = t_2.reshape(x1, x2, x3) t_2 = self.l_1(t_2) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_2 = (x0 + t_2) t_1 = self.l_4(t_2) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = (t_2 + t_1) t_2 = self.l_11(t_1) t_0 = t_2.shape t_3 = t_0[0] t_4 = t_0[1] t_0 = t_0[2] t_2 = self.l_12(t_2) t_5 = (t_0 // 16) t_5 = t_2.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_2 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_2 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_13(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_0 = t_5.reshape(t_3, t_4, t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_1 = self.l_17(t_0) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = (t_0 + t_1) t_0 = self.l_24(t_1) t_4 = t_0.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_0 = self.l_25(t_0) t_6 = (t_4 // 16) t_6 = t_0.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_0 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_0 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_26(t_2) t_6 = (t_2 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_27(t_4) t_4 = self.l_28(t_4) t_4 = self.l_29(t_4) t_4 = (t_1 + t_4) t_1 = self.l_30(t_4) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = (t_4 + t_1) t_4 = self.l_37(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_38(t_4) t_2 = (t_5 // 16) t_2 = t_4.reshape(t_3, t_6, 3, 16, t_2) t_2 = t_2.permute(2, 0, 3, 1, 4) t_4 = t_2[0] t_0 = t_2[1] t_2 = t_2[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_4 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_39(t_0) t_2 = (t_0 @ t_2) t_2 = t_2.transpose(1, 2) t_5 = t_2.reshape(t_3, t_6, t_5) t_5 = self.l_40(t_5) t_5 = self.l_41(t_5) t_5 = self.l_42(t_5) t_5 = (t_1 + t_5) t_1 = self.l_43(t_5) t_1 = self.l_44(t_1) t_1 = self.l_45(t_1) t_1 = self.l_46(t_1) t_1 = self.l_47(t_1) t_1 = self.l_48(t_1) t_1 = self.l_49(t_1) t_1 = (t_5 + t_1) t_1 = self.l_50(t_1) t_1 = t_1[(slice(None, None, None), 0)] t_1 = self.l_51(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[nn.Module]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module)]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basick_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=True)
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=128): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Linear, Conv2d, LayerNorm, Dropout, Identity, GELU), 'model_inputs': {'input0': {'shape': torch.Size([128, 3, 384, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([128, 1000]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([128, 3, 384, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___330': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]': {'shape': torch.Size([128, 145, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[13]/Tensor::__add___762': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[proj]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[13]/Tensor::__add___762': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[proj]': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[16]/Tensor::__add___921': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___926': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___928': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___930': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Tensor::__matmul___958': {'shape': torch.Size([128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[16]/Tensor::__add___921': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___926': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___928': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Size::__getitem___930': {'shape': None, 'dtype': int, 'req_grad': False, 'is_batched': False, 'created_by': 5}, 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Tensor::__matmul___958': {'shape': torch.Size([128, 16, 145, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[19]/Tensor::__add___1080': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Tensor::__getattribute___1083': {'shape': torch.Size([3]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]': {'shape': torch.Size([128, 145, 3072]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[19]/Tensor::__add___1080': {'shape': torch.Size([128, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Tensor::__getattribute___1083': {'shape': torch.Size([3]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]': {'shape': torch.Size([128, 145, 3072]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([128, 1000]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['VisionTransformer/PatchEmbed[patch_embed]/Conv2d[proj]', 'VisionTransformer/Dropout[pos_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]'] TENSORS = ['VisionTransformer/Parameter[cls_token]', 'VisionTransformer/Parameter[pos_embed]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'patch_embed.proj', 'l_1': 'pos_drop', 'l_2': 'blocks.0.norm1', 'l_3': 'blocks.0.attn.qkv', 'l_4': 'blocks.0.attn.attn_drop', 'l_5': 'blocks.0.attn.proj', 'l_6': 'blocks.0.attn.proj_drop', 'l_7': 'blocks.0.drop_path', 'l_8': 'blocks.0.norm2', 'l_9': 'blocks.0.mlp.fc1', 'l_10': 'blocks.0.mlp.act', 'l_11': 'blocks.0.mlp.drop', 'l_12': 'blocks.0.mlp.fc2', 'l_13': 'blocks.0.mlp.drop', 'l_14': 'blocks.0.drop_path', 'l_15': 'blocks.1.norm1', 'l_16': 'blocks.1.attn.qkv', 'l_17': 'blocks.1.attn.attn_drop', 'l_18': 'blocks.1.attn.proj', 'l_19': 'blocks.1.attn.proj_drop', 'l_20': 'blocks.1.drop_path', 'l_21': 'blocks.1.norm2', 'l_22': 'blocks.1.mlp.fc1', 'l_23': 'blocks.1.mlp.act', 'l_24': 'blocks.1.mlp.drop', 'l_25': 'blocks.1.mlp.fc2', 'l_26': 'blocks.1.mlp.drop', 'l_27': 'blocks.1.drop_path', 'l_28': 'blocks.2.norm1', 'l_29': 'blocks.2.attn.qkv', 'l_30': 'blocks.2.attn.attn_drop', 'l_31': 'blocks.2.attn.proj', 'l_32': 'blocks.2.attn.proj_drop', 'l_33': 'blocks.2.drop_path', 'l_34': 'blocks.2.norm2', 'l_35': 'blocks.2.mlp.fc1', 'l_36': 'blocks.2.mlp.act', 'l_37': 'blocks.2.mlp.drop', 'p_0': 'cls_token', 'p_1': 'pos_embed'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = x0.shape t_0 = t_0[0] t_1 = self.l_0(x0) t_1 = t_1.flatten(2) t_1 = t_1.transpose(1, 2) t_0 = self.p_0.expand(t_0, (- 1), (- 1)) t_1 = (t_0, t_1) t_1 = torch.cat(t_1, dim=1) t_1 = (t_1 + self.p_1) t_1 = self.l_1(t_1) t_0 = self.l_2(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_3(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_1 + t_2) t_1 = self.l_8(t_2) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = self.l_11(t_1) t_1 = self.l_12(t_1) t_1 = self.l_13(t_1) t_1 = self.l_14(t_1) t_1 = (t_2 + t_1) t_2 = self.l_15(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_17(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_1 + t_4) t_1 = self.l_21(t_4) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = self.l_24(t_1) t_1 = self.l_25(t_1) t_1 = self.l_26(t_1) t_1 = self.l_27(t_1) t_1 = (t_4 + t_1) t_4 = self.l_28(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_0 = (t_5 // 16) t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_4 = t_0[0] t_2 = t_0[1] t_0 = t_0[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_0 = (t_2 @ t_0) t_0 = t_0.transpose(1, 2) t_5 = t_0.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_1 + t_5) t_1 = self.l_34(t_5) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = self.l_37(t_1) return list(flatten((t_5, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.2.mlp.fc2', 'l_1': 'blocks.2.mlp.drop', 'l_2': 'blocks.2.drop_path', 'l_3': 'blocks.3.norm1', 'l_4': 'blocks.3.attn.qkv', 'l_5': 'blocks.3.attn.attn_drop', 'l_6': 'blocks.3.attn.proj', 'l_7': 'blocks.3.attn.proj_drop', 'l_8': 'blocks.3.drop_path', 'l_9': 'blocks.3.norm2', 'l_10': 'blocks.3.mlp.fc1', 'l_11': 'blocks.3.mlp.act', 'l_12': 'blocks.3.mlp.drop', 'l_13': 'blocks.3.mlp.fc2', 'l_14': 'blocks.3.mlp.drop', 'l_15': 'blocks.3.drop_path', 'l_16': 'blocks.4.norm1', 'l_17': 'blocks.4.attn.qkv', 'l_18': 'blocks.4.attn.attn_drop', 'l_19': 'blocks.4.attn.proj', 'l_20': 'blocks.4.attn.proj_drop', 'l_21': 'blocks.4.drop_path', 'l_22': 'blocks.4.norm2', 'l_23': 'blocks.4.mlp.fc1', 'l_24': 'blocks.4.mlp.act', 'l_25': 'blocks.4.mlp.drop', 'l_26': 'blocks.4.mlp.fc2', 'l_27': 'blocks.4.mlp.drop', 'l_28': 'blocks.4.drop_path', 'l_29': 'blocks.5.norm1', 'l_30': 'blocks.5.attn.qkv', 'l_31': 'blocks.5.attn.attn_drop', 'l_32': 'blocks.5.attn.proj', 'l_33': 'blocks.5.attn.proj_drop', 'l_34': 'blocks.5.drop_path', 'l_35': 'blocks.5.norm2', 'l_36': 'blocks.5.mlp.fc1', 'l_37': 'blocks.5.mlp.act'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_4(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_5(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = self.l_8(t_2) t_2 = (t_0 + t_2) t_0 = self.l_9(t_2) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = (t_2 + t_0) t_2 = self.l_16(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_17(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_18(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = self.l_21(t_4) t_4 = (t_0 + t_4) t_0 = self.l_22(t_4) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = (t_4 + t_0) t_4 = self.l_29(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_30(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_31(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = self.l_34(t_5) t_5 = (t_0 + t_5) t_0 = self.l_35(t_5) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.5.mlp.drop', 'l_1': 'blocks.5.mlp.fc2', 'l_2': 'blocks.5.mlp.drop', 'l_3': 'blocks.5.drop_path', 'l_4': 'blocks.6.norm1', 'l_5': 'blocks.6.attn.qkv', 'l_6': 'blocks.6.attn.attn_drop', 'l_7': 'blocks.6.attn.proj', 'l_8': 'blocks.6.attn.proj_drop', 'l_9': 'blocks.6.drop_path', 'l_10': 'blocks.6.norm2', 'l_11': 'blocks.6.mlp.fc1', 'l_12': 'blocks.6.mlp.act', 'l_13': 'blocks.6.mlp.drop', 'l_14': 'blocks.6.mlp.fc2', 'l_15': 'blocks.6.mlp.drop', 'l_16': 'blocks.6.drop_path', 'l_17': 'blocks.7.norm1', 'l_18': 'blocks.7.attn.qkv', 'l_19': 'blocks.7.attn.attn_drop', 'l_20': 'blocks.7.attn.proj', 'l_21': 'blocks.7.attn.proj_drop', 'l_22': 'blocks.7.drop_path', 'l_23': 'blocks.7.norm2', 'l_24': 'blocks.7.mlp.fc1', 'l_25': 'blocks.7.mlp.act', 'l_26': 'blocks.7.mlp.drop', 'l_27': 'blocks.7.mlp.fc2', 'l_28': 'blocks.7.mlp.drop', 'l_29': 'blocks.7.drop_path', 'l_30': 'blocks.8.norm1', 'l_31': 'blocks.8.attn.qkv', 'l_32': 'blocks.8.attn.attn_drop', 'l_33': 'blocks.8.attn.proj', 'l_34': 'blocks.8.attn.proj_drop', 'l_35': 'blocks.8.drop_path', 'l_36': 'blocks.8.norm2', 'l_37': 'blocks.8.mlp.fc1'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = (x0 + t_0) t_1 = self.l_4(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_5(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_6(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_7(t_2) t_2 = self.l_8(t_2) t_2 = self.l_9(t_2) t_2 = (t_0 + t_2) t_0 = self.l_10(t_2) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_2 + t_0) t_2 = self.l_17(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_18(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_19(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_20(t_4) t_4 = self.l_21(t_4) t_4 = self.l_22(t_4) t_4 = (t_0 + t_4) t_0 = self.l_23(t_4) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = self.l_29(t_0) t_0 = (t_4 + t_0) t_4 = self.l_30(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_31(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_32(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_33(t_5) t_5 = self.l_34(t_5) t_5 = self.l_35(t_5) t_5 = (t_0 + t_5) t_0 = self.l_36(t_5) t_0 = self.l_37(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.8.mlp.act', 'l_1': 'blocks.8.mlp.drop', 'l_2': 'blocks.8.mlp.fc2', 'l_3': 'blocks.8.mlp.drop', 'l_4': 'blocks.8.drop_path', 'l_5': 'blocks.9.norm1', 'l_6': 'blocks.9.attn.qkv', 'l_7': 'blocks.9.attn.attn_drop', 'l_8': 'blocks.9.attn.proj', 'l_9': 'blocks.9.attn.proj_drop', 'l_10': 'blocks.9.drop_path', 'l_11': 'blocks.9.norm2', 'l_12': 'blocks.9.mlp.fc1', 'l_13': 'blocks.9.mlp.act', 'l_14': 'blocks.9.mlp.drop', 'l_15': 'blocks.9.mlp.fc2', 'l_16': 'blocks.9.mlp.drop', 'l_17': 'blocks.9.drop_path', 'l_18': 'blocks.10.norm1', 'l_19': 'blocks.10.attn.qkv', 'l_20': 'blocks.10.attn.attn_drop', 'l_21': 'blocks.10.attn.proj', 'l_22': 'blocks.10.attn.proj_drop', 'l_23': 'blocks.10.drop_path', 'l_24': 'blocks.10.norm2', 'l_25': 'blocks.10.mlp.fc1', 'l_26': 'blocks.10.mlp.act', 'l_27': 'blocks.10.mlp.drop', 'l_28': 'blocks.10.mlp.fc2', 'l_29': 'blocks.10.mlp.drop', 'l_30': 'blocks.10.drop_path', 'l_31': 'blocks.11.norm1', 'l_32': 'blocks.11.attn.qkv', 'l_33': 'blocks.11.attn.attn_drop', 'l_34': 'blocks.11.attn.proj', 'l_35': 'blocks.11.attn.proj_drop', 'l_36': 'blocks.11.drop_path', 'l_37': 'blocks.11.norm2'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = self.l_4(t_0) t_0 = (x0 + t_0) t_1 = self.l_5(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_6(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_7(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_8(t_2) t_2 = self.l_9(t_2) t_2 = self.l_10(t_2) t_2 = (t_0 + t_2) t_0 = self.l_11(t_2) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = self.l_17(t_0) t_0 = (t_2 + t_0) t_2 = self.l_18(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_19(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_20(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_21(t_4) t_4 = self.l_22(t_4) t_4 = self.l_23(t_4) t_4 = (t_0 + t_4) t_0 = self.l_24(t_4) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = (t_4 + t_0) t_4 = self.l_31(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_32(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_33(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_34(t_5) t_5 = self.l_35(t_5) t_5 = self.l_36(t_5) t_5 = (t_0 + t_5) t_0 = self.l_37(t_5) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[proj]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.11.mlp.fc1', 'l_1': 'blocks.11.mlp.act', 'l_2': 'blocks.11.mlp.drop', 'l_3': 'blocks.11.mlp.fc2', 'l_4': 'blocks.11.mlp.drop', 'l_5': 'blocks.11.drop_path', 'l_6': 'blocks.12.norm1', 'l_7': 'blocks.12.attn.qkv', 'l_8': 'blocks.12.attn.attn_drop', 'l_9': 'blocks.12.attn.proj', 'l_10': 'blocks.12.attn.proj_drop', 'l_11': 'blocks.12.drop_path', 'l_12': 'blocks.12.norm2', 'l_13': 'blocks.12.mlp.fc1', 'l_14': 'blocks.12.mlp.act', 'l_15': 'blocks.12.mlp.drop', 'l_16': 'blocks.12.mlp.fc2', 'l_17': 'blocks.12.mlp.drop', 'l_18': 'blocks.12.drop_path', 'l_19': 'blocks.13.norm1', 'l_20': 'blocks.13.attn.qkv', 'l_21': 'blocks.13.attn.attn_drop', 'l_22': 'blocks.13.attn.proj', 'l_23': 'blocks.13.attn.proj_drop', 'l_24': 'blocks.13.drop_path', 'l_25': 'blocks.13.norm2', 'l_26': 'blocks.13.mlp.fc1', 'l_27': 'blocks.13.mlp.act', 'l_28': 'blocks.13.mlp.drop', 'l_29': 'blocks.13.mlp.fc2', 'l_30': 'blocks.13.mlp.drop', 'l_31': 'blocks.13.drop_path', 'l_32': 'blocks.14.norm1', 'l_33': 'blocks.14.attn.qkv', 'l_34': 'blocks.14.attn.attn_drop', 'l_35': 'blocks.14.attn.proj'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = self.l_3(t_0) t_0 = self.l_4(t_0) t_0 = self.l_5(t_0) t_0 = (x0 + t_0) t_1 = self.l_6(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_7(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_8(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_9(t_2) t_2 = self.l_10(t_2) t_2 = self.l_11(t_2) t_2 = (t_0 + t_2) t_0 = self.l_12(t_2) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = self.l_17(t_0) t_0 = self.l_18(t_0) t_0 = (t_2 + t_0) t_2 = self.l_19(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_20(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_21(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_22(t_4) t_4 = self.l_23(t_4) t_4 = self.l_24(t_4) t_4 = (t_0 + t_4) t_0 = self.l_25(t_4) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = self.l_28(t_0) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = self.l_31(t_0) t_0 = (t_4 + t_0) t_4 = self.l_32(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_33(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_34(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_35(t_5) return list(flatten((t_0, t_5))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[attn_drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.14.attn.proj_drop', 'l_1': 'blocks.14.drop_path', 'l_2': 'blocks.14.norm2', 'l_3': 'blocks.14.mlp.fc1', 'l_4': 'blocks.14.mlp.act', 'l_5': 'blocks.14.mlp.drop', 'l_6': 'blocks.14.mlp.fc2', 'l_7': 'blocks.14.mlp.drop', 'l_8': 'blocks.14.drop_path', 'l_9': 'blocks.15.norm1', 'l_10': 'blocks.15.attn.qkv', 'l_11': 'blocks.15.attn.attn_drop', 'l_12': 'blocks.15.attn.proj', 'l_13': 'blocks.15.attn.proj_drop', 'l_14': 'blocks.15.drop_path', 'l_15': 'blocks.15.norm2', 'l_16': 'blocks.15.mlp.fc1', 'l_17': 'blocks.15.mlp.act', 'l_18': 'blocks.15.mlp.drop', 'l_19': 'blocks.15.mlp.fc2', 'l_20': 'blocks.15.mlp.drop', 'l_21': 'blocks.15.drop_path', 'l_22': 'blocks.16.norm1', 'l_23': 'blocks.16.attn.qkv', 'l_24': 'blocks.16.attn.attn_drop', 'l_25': 'blocks.16.attn.proj', 'l_26': 'blocks.16.attn.proj_drop', 'l_27': 'blocks.16.drop_path', 'l_28': 'blocks.16.norm2', 'l_29': 'blocks.16.mlp.fc1', 'l_30': 'blocks.16.mlp.act', 'l_31': 'blocks.16.mlp.drop', 'l_32': 'blocks.16.mlp.fc2', 'l_33': 'blocks.16.mlp.drop', 'l_34': 'blocks.16.drop_path', 'l_35': 'blocks.17.norm1', 'l_36': 'blocks.17.attn.qkv', 'l_37': 'blocks.17.attn.attn_drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = (t_0 + t_1) t_0 = self.l_9(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_10(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_11(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_12(t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = (t_1 + t_2) t_1 = self.l_15(t_2) t_1 = self.l_16(t_1) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = (t_2 + t_1) t_2 = self.l_22(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_23(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_24(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_25(t_4) t_4 = self.l_26(t_4) t_4 = self.l_27(t_4) t_4 = (t_1 + t_4) t_1 = self.l_28(t_4) t_1 = self.l_29(t_1) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = (t_4 + t_1) t_4 = self.l_35(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_36(t_4) t_0 = (t_5 // 16) t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_4 = t_0[0] t_2 = t_0[1] t_0 = t_0[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_37(t_2) t_0 = (t_2 @ t_0) return list(flatten((t_1, t_3, t_6, t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'blocks.17.attn.proj', 'l_1': 'blocks.17.attn.proj_drop', 'l_2': 'blocks.17.drop_path', 'l_3': 'blocks.17.norm2', 'l_4': 'blocks.17.mlp.fc1', 'l_5': 'blocks.17.mlp.act', 'l_6': 'blocks.17.mlp.drop', 'l_7': 'blocks.17.mlp.fc2', 'l_8': 'blocks.17.mlp.drop', 'l_9': 'blocks.17.drop_path', 'l_10': 'blocks.18.norm1', 'l_11': 'blocks.18.attn.qkv', 'l_12': 'blocks.18.attn.attn_drop', 'l_13': 'blocks.18.attn.proj', 'l_14': 'blocks.18.attn.proj_drop', 'l_15': 'blocks.18.drop_path', 'l_16': 'blocks.18.norm2', 'l_17': 'blocks.18.mlp.fc1', 'l_18': 'blocks.18.mlp.act', 'l_19': 'blocks.18.mlp.drop', 'l_20': 'blocks.18.mlp.fc2', 'l_21': 'blocks.18.mlp.drop', 'l_22': 'blocks.18.drop_path', 'l_23': 'blocks.19.norm1', 'l_24': 'blocks.19.attn.qkv', 'l_25': 'blocks.19.attn.attn_drop', 'l_26': 'blocks.19.attn.proj', 'l_27': 'blocks.19.attn.proj_drop', 'l_28': 'blocks.19.drop_path', 'l_29': 'blocks.19.norm2', 'l_30': 'blocks.19.mlp.fc1', 'l_31': 'blocks.19.mlp.act', 'l_32': 'blocks.19.mlp.drop', 'l_33': 'blocks.19.mlp.fc2', 'l_34': 'blocks.19.mlp.drop', 'l_35': 'blocks.19.drop_path', 'l_36': 'blocks.20.norm1', 'l_37': 'blocks.20.attn.qkv'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = x4.transpose(1, 2) t_0 = t_0.reshape(x1, x2, x3) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (x0 + t_0) t_1 = self.l_3(t_0) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = (t_0 + t_1) t_0 = self.l_10(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_11(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_12(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_13(t_2) t_2 = self.l_14(t_2) t_2 = self.l_15(t_2) t_2 = (t_1 + t_2) t_1 = self.l_16(t_2) t_1 = self.l_17(t_1) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = (t_2 + t_1) t_2 = self.l_23(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_24(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_25(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_26(t_4) t_4 = self.l_27(t_4) t_4 = self.l_28(t_4) t_4 = (t_1 + t_4) t_1 = self.l_29(t_4) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = (t_4 + t_1) t_4 = self.l_36(t_1) t_5 = t_4.shape t_4 = self.l_37(t_4) return list(flatten((t_1, t_5, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/LayerNorm[norm]', 'VisionTransformer/Linear[head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'blocks.20.attn.attn_drop', 'l_1': 'blocks.20.attn.proj', 'l_2': 'blocks.20.attn.proj_drop', 'l_3': 'blocks.20.drop_path', 'l_4': 'blocks.20.norm2', 'l_5': 'blocks.20.mlp.fc1', 'l_6': 'blocks.20.mlp.act', 'l_7': 'blocks.20.mlp.drop', 'l_8': 'blocks.20.mlp.fc2', 'l_9': 'blocks.20.mlp.drop', 'l_10': 'blocks.20.drop_path', 'l_11': 'blocks.21.norm1', 'l_12': 'blocks.21.attn.qkv', 'l_13': 'blocks.21.attn.attn_drop', 'l_14': 'blocks.21.attn.proj', 'l_15': 'blocks.21.attn.proj_drop', 'l_16': 'blocks.21.drop_path', 'l_17': 'blocks.21.norm2', 'l_18': 'blocks.21.mlp.fc1', 'l_19': 'blocks.21.mlp.act', 'l_20': 'blocks.21.mlp.drop', 'l_21': 'blocks.21.mlp.fc2', 'l_22': 'blocks.21.mlp.drop', 'l_23': 'blocks.21.drop_path', 'l_24': 'blocks.22.norm1', 'l_25': 'blocks.22.attn.qkv', 'l_26': 'blocks.22.attn.attn_drop', 'l_27': 'blocks.22.attn.proj', 'l_28': 'blocks.22.attn.proj_drop', 'l_29': 'blocks.22.drop_path', 'l_30': 'blocks.22.norm2', 'l_31': 'blocks.22.mlp.fc1', 'l_32': 'blocks.22.mlp.act', 'l_33': 'blocks.22.mlp.drop', 'l_34': 'blocks.22.mlp.fc2', 'l_35': 'blocks.22.mlp.drop', 'l_36': 'blocks.22.drop_path', 'l_37': 'blocks.23.norm1', 'l_38': 'blocks.23.attn.qkv', 'l_39': 'blocks.23.attn.attn_drop', 'l_40': 'blocks.23.attn.proj', 'l_41': 'blocks.23.attn.proj_drop', 'l_42': 'blocks.23.drop_path', 'l_43': 'blocks.23.norm2', 'l_44': 'blocks.23.mlp.fc1', 'l_45': 'blocks.23.mlp.act', 'l_46': 'blocks.23.mlp.drop', 'l_47': 'blocks.23.mlp.fc2', 'l_48': 'blocks.23.mlp.drop', 'l_49': 'blocks.23.drop_path', 'l_50': 'norm', 'l_51': 'head'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = x1[0] t_1 = x1[1] t_2 = x1[2] t_3 = (t_2 // 16) t_3 = x2.reshape(t_0, t_1, 3, 16, t_3) t_3 = t_3.permute(2, 0, 3, 1, 4) t_4 = t_3[0] t_5 = t_3[1] t_3 = t_3[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_4 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_0(t_5) t_3 = (t_5 @ t_3) t_3 = t_3.transpose(1, 2) t_2 = t_3.reshape(t_0, t_1, t_2) t_2 = self.l_1(t_2) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_2 = (x0 + t_2) t_1 = self.l_4(t_2) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = (t_2 + t_1) t_2 = self.l_11(t_1) t_0 = t_2.shape t_3 = t_0[0] t_5 = t_0[1] t_0 = t_0[2] t_2 = self.l_12(t_2) t_4 = (t_0 // 16) t_4 = t_2.reshape(t_3, t_5, 3, 16, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_2 = t_4[0] t_6 = t_4[1] t_4 = t_4[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_2 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_13(t_6) t_4 = (t_6 @ t_4) t_4 = t_4.transpose(1, 2) t_0 = t_4.reshape(t_3, t_5, t_0) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_1 = self.l_17(t_0) t_1 = self.l_18(t_1) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = (t_0 + t_1) t_0 = self.l_24(t_1) t_5 = t_0.shape t_3 = t_5[0] t_4 = t_5[1] t_5 = t_5[2] t_0 = self.l_25(t_0) t_6 = (t_5 // 16) t_6 = t_0.reshape(t_3, t_4, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_0 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_0 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_26(t_2) t_6 = (t_2 @ t_6) t_6 = t_6.transpose(1, 2) t_5 = t_6.reshape(t_3, t_4, t_5) t_5 = self.l_27(t_5) t_5 = self.l_28(t_5) t_5 = self.l_29(t_5) t_5 = (t_1 + t_5) t_1 = self.l_30(t_5) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = (t_5 + t_1) t_5 = self.l_37(t_1) t_4 = t_5.shape t_3 = t_4[0] t_6 = t_4[1] t_4 = t_4[2] t_5 = self.l_38(t_5) t_2 = (t_4 // 16) t_2 = t_5.reshape(t_3, t_6, 3, 16, t_2) t_2 = t_2.permute(2, 0, 3, 1, 4) t_5 = t_2[0] t_0 = t_2[1] t_2 = t_2[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_5 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_39(t_0) t_2 = (t_0 @ t_2) t_2 = t_2.transpose(1, 2) t_4 = t_2.reshape(t_3, t_6, t_4) t_4 = self.l_40(t_4) t_4 = self.l_41(t_4) t_4 = self.l_42(t_4) t_4 = (t_1 + t_4) t_1 = self.l_43(t_4) t_1 = self.l_44(t_1) t_1 = self.l_45(t_1) t_1 = self.l_46(t_1) t_1 = self.l_47(t_1) t_1 = self.l_48(t_1) t_1 = self.l_49(t_1) t_1 = (t_4 + t_1) t_1 = self.l_50(t_1) t_1 = t_1[(slice(None, None, None), 0)] t_1 = self.l_51(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module)]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basick_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=True)
def named_buffers(partition, recurse=True): params = nn.Module.named_buffers(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, recurse=True): params = nn.Module.named_parameters(partition, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=32): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Identity, LayerNorm, Conv2d, GELU, Linear, Dropout), 'model_inputs': {'input0': {'shape': torch.Size([32, 3, 384, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([32, 1000]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([32, 3, 384, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[2]/Tensor::__add___171': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___338': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[5]/Tensor::__add___338': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[8]/Tensor::__add___489': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[11]/Tensor::__add___648': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[14]/Tensor::__add___815': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[14]/Tensor::__add___815': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[17]/Tensor::__add___974': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[17]/Tensor::__add___974': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'VisionTransformer/ModuleList[blocks]/Block[20]/Tensor::__add___1133': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm1]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Tensor::__getattribute___1136': {'shape': torch.Size([3]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'VisionTransformer/ModuleList[blocks]/Block[20]/Tensor::__add___1133': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm1]': {'shape': torch.Size([32, 145, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Tensor::__getattribute___1136': {'shape': torch.Size([3]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'created_by': 6}}, 'outputs': {'VisionTransformer/Linear[head]': {'shape': torch.Size([32, 1000]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['VisionTransformer/PatchEmbed[patch_embed]/Conv2d[proj]', 'VisionTransformer/Dropout[pos_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]'] TENSORS = ['VisionTransformer/Parameter[cls_token]', 'VisionTransformer/Parameter[pos_embed]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'patch_embed.proj', 'l_1': 'pos_drop', 'l_2': 'blocks.0.norm1', 'l_3': 'blocks.0.attn.qkv', 'l_4': 'blocks.0.attn.attn_drop', 'l_5': 'blocks.0.attn.proj', 'l_6': 'blocks.0.attn.proj_drop', 'l_7': 'blocks.0.drop_path', 'l_8': 'blocks.0.norm2', 'l_9': 'blocks.0.mlp.fc1', 'l_10': 'blocks.0.mlp.act', 'l_11': 'blocks.0.mlp.drop', 'l_12': 'blocks.0.mlp.fc2', 'l_13': 'blocks.0.mlp.drop', 'l_14': 'blocks.0.drop_path', 'l_15': 'blocks.1.norm1', 'l_16': 'blocks.1.attn.qkv', 'l_17': 'blocks.1.attn.attn_drop', 'l_18': 'blocks.1.attn.proj', 'l_19': 'blocks.1.attn.proj_drop', 'l_20': 'blocks.1.drop_path', 'l_21': 'blocks.1.norm2', 'l_22': 'blocks.1.mlp.fc1', 'l_23': 'blocks.1.mlp.act', 'l_24': 'blocks.1.mlp.drop', 'l_25': 'blocks.1.mlp.fc2', 'l_26': 'blocks.1.mlp.drop', 'l_27': 'blocks.1.drop_path', 'l_28': 'blocks.2.norm1', 'l_29': 'blocks.2.attn.qkv', 'l_30': 'blocks.2.attn.attn_drop', 'l_31': 'blocks.2.attn.proj', 'l_32': 'blocks.2.attn.proj_drop', 'l_33': 'blocks.2.drop_path', 'l_34': 'blocks.2.norm2', 'l_35': 'blocks.2.mlp.fc1', 'l_36': 'blocks.2.mlp.act', 'l_37': 'blocks.2.mlp.drop', 'l_38': 'blocks.2.mlp.fc2', 'p_0': 'cls_token', 'p_1': 'pos_embed'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = x0.shape t_0 = t_0[0] t_1 = self.l_0(x0) t_1 = t_1.flatten(2) t_1 = t_1.transpose(1, 2) t_0 = self.p_0.expand(t_0, (- 1), (- 1)) t_1 = (t_0, t_1) t_1 = torch.cat(t_1, dim=1) t_1 = (t_1 + self.p_1) t_1 = self.l_1(t_1) t_0 = self.l_2(t_1) t_2 = t_0.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_0 = self.l_3(t_0) t_5 = (t_2 // 16) t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_0 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_0 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_1 + t_2) t_1 = self.l_8(t_2) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = self.l_11(t_1) t_1 = self.l_12(t_1) t_1 = self.l_13(t_1) t_1 = self.l_14(t_1) t_1 = (t_2 + t_1) t_2 = self.l_15(t_1) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_0 = t_6[1] t_6 = t_6[2] t_0 = t_0.transpose((- 2), (- 1)) t_0 = (t_2 @ t_0) t_0 = (t_0 * 0.125) t_0 = t_0.softmax(dim=(- 1)) t_0 = self.l_17(t_0) t_6 = (t_0 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_1 + t_4) t_1 = self.l_21(t_4) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = self.l_24(t_1) t_1 = self.l_25(t_1) t_1 = self.l_26(t_1) t_1 = self.l_27(t_1) t_1 = (t_4 + t_1) t_4 = self.l_28(t_1) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_0 = (t_5 // 16) t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_4 = t_0[0] t_2 = t_0[1] t_0 = t_0[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_0 = (t_2 @ t_0) t_0 = t_0.transpose(1, 2) t_5 = t_0.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_1 + t_5) t_1 = self.l_34(t_5) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = self.l_37(t_1) t_1 = self.l_38(t_1) return list(flatten((t_5, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.2.mlp.drop', 'l_1': 'blocks.2.drop_path', 'l_2': 'blocks.3.norm1', 'l_3': 'blocks.3.attn.qkv', 'l_4': 'blocks.3.attn.attn_drop', 'l_5': 'blocks.3.attn.proj', 'l_6': 'blocks.3.attn.proj_drop', 'l_7': 'blocks.3.drop_path', 'l_8': 'blocks.3.norm2', 'l_9': 'blocks.3.mlp.fc1', 'l_10': 'blocks.3.mlp.act', 'l_11': 'blocks.3.mlp.drop', 'l_12': 'blocks.3.mlp.fc2', 'l_13': 'blocks.3.mlp.drop', 'l_14': 'blocks.3.drop_path', 'l_15': 'blocks.4.norm1', 'l_16': 'blocks.4.attn.qkv', 'l_17': 'blocks.4.attn.attn_drop', 'l_18': 'blocks.4.attn.proj', 'l_19': 'blocks.4.attn.proj_drop', 'l_20': 'blocks.4.drop_path', 'l_21': 'blocks.4.norm2', 'l_22': 'blocks.4.mlp.fc1', 'l_23': 'blocks.4.mlp.act', 'l_24': 'blocks.4.mlp.drop', 'l_25': 'blocks.4.mlp.fc2', 'l_26': 'blocks.4.mlp.drop', 'l_27': 'blocks.4.drop_path', 'l_28': 'blocks.5.norm1', 'l_29': 'blocks.5.attn.qkv', 'l_30': 'blocks.5.attn.attn_drop', 'l_31': 'blocks.5.attn.proj', 'l_32': 'blocks.5.attn.proj_drop', 'l_33': 'blocks.5.drop_path', 'l_34': 'blocks.5.norm2', 'l_35': 'blocks.5.mlp.fc1', 'l_36': 'blocks.5.mlp.act', 'l_37': 'blocks.5.mlp.drop', 'l_38': 'blocks.5.mlp.fc2', 'l_39': 'blocks.5.mlp.drop', 'l_40': 'blocks.5.drop_path'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_0 + t_4) t_0 = self.l_21(t_4) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = (t_4 + t_0) t_4 = self.l_28(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_0 + t_5) t_0 = self.l_34(t_5) t_0 = self.l_35(t_0) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) t_0 = self.l_38(t_0) t_0 = self.l_39(t_0) t_0 = self.l_40(t_0) t_0 = (t_5 + t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[6]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[6]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[7]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[7]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[8]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Linear[fc2]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'blocks.6.norm1', 'l_1': 'blocks.6.attn.qkv', 'l_2': 'blocks.6.attn.attn_drop', 'l_3': 'blocks.6.attn.proj', 'l_4': 'blocks.6.attn.proj_drop', 'l_5': 'blocks.6.drop_path', 'l_6': 'blocks.6.norm2', 'l_7': 'blocks.6.mlp.fc1', 'l_8': 'blocks.6.mlp.act', 'l_9': 'blocks.6.mlp.drop', 'l_10': 'blocks.6.mlp.fc2', 'l_11': 'blocks.6.mlp.drop', 'l_12': 'blocks.6.drop_path', 'l_13': 'blocks.7.norm1', 'l_14': 'blocks.7.attn.qkv', 'l_15': 'blocks.7.attn.attn_drop', 'l_16': 'blocks.7.attn.proj', 'l_17': 'blocks.7.attn.proj_drop', 'l_18': 'blocks.7.drop_path', 'l_19': 'blocks.7.norm2', 'l_20': 'blocks.7.mlp.fc1', 'l_21': 'blocks.7.mlp.act', 'l_22': 'blocks.7.mlp.drop', 'l_23': 'blocks.7.mlp.fc2', 'l_24': 'blocks.7.mlp.drop', 'l_25': 'blocks.7.drop_path', 'l_26': 'blocks.8.norm1', 'l_27': 'blocks.8.attn.qkv', 'l_28': 'blocks.8.attn.attn_drop', 'l_29': 'blocks.8.attn.proj', 'l_30': 'blocks.8.attn.proj_drop', 'l_31': 'blocks.8.drop_path', 'l_32': 'blocks.8.norm2', 'l_33': 'blocks.8.mlp.fc1', 'l_34': 'blocks.8.mlp.act', 'l_35': 'blocks.8.mlp.drop', 'l_36': 'blocks.8.mlp.fc2'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_1 = t_0.shape t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_0 = self.l_1(t_0) t_4 = (t_1 // 16) t_4 = t_0.reshape(t_2, t_3, 3, 16, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_0 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_0 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_2(t_5) t_4 = (t_5 @ t_4) t_4 = t_4.transpose(1, 2) t_1 = t_4.reshape(t_2, t_3, t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = (x0 + t_1) t_3 = self.l_6(t_1) t_3 = self.l_7(t_3) t_3 = self.l_8(t_3) t_3 = self.l_9(t_3) t_3 = self.l_10(t_3) t_3 = self.l_11(t_3) t_3 = self.l_12(t_3) t_3 = (t_1 + t_3) t_1 = self.l_13(t_3) t_2 = t_1.shape t_4 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_1 = self.l_14(t_1) t_0 = (t_2 // 16) t_0 = t_1.reshape(t_4, t_5, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_1 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_15(t_6) t_0 = (t_6 @ t_0) t_0 = t_0.transpose(1, 2) t_2 = t_0.reshape(t_4, t_5, t_2) t_2 = self.l_16(t_2) t_2 = self.l_17(t_2) t_2 = self.l_18(t_2) t_2 = (t_3 + t_2) t_3 = self.l_19(t_2) t_3 = self.l_20(t_3) t_3 = self.l_21(t_3) t_3 = self.l_22(t_3) t_3 = self.l_23(t_3) t_3 = self.l_24(t_3) t_3 = self.l_25(t_3) t_3 = (t_2 + t_3) t_2 = self.l_26(t_3) t_5 = t_2.shape t_4 = t_5[0] t_0 = t_5[1] t_5 = t_5[2] t_2 = self.l_27(t_2) t_6 = (t_5 // 16) t_6 = t_2.reshape(t_4, t_0, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_28(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_5 = t_6.reshape(t_4, t_0, t_5) t_5 = self.l_29(t_5) t_5 = self.l_30(t_5) t_5 = self.l_31(t_5) t_5 = (t_3 + t_5) t_3 = self.l_32(t_5) t_3 = self.l_33(t_3) t_3 = self.l_34(t_3) t_3 = self.l_35(t_3) t_3 = self.l_36(t_3) return list(flatten((t_5, t_3))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[8]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[8]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[9]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[9]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[10]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[10]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[11]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[11]/Mlp[mlp]/Dropout[drop]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.8.mlp.drop', 'l_1': 'blocks.8.drop_path', 'l_2': 'blocks.9.norm1', 'l_3': 'blocks.9.attn.qkv', 'l_4': 'blocks.9.attn.attn_drop', 'l_5': 'blocks.9.attn.proj', 'l_6': 'blocks.9.attn.proj_drop', 'l_7': 'blocks.9.drop_path', 'l_8': 'blocks.9.norm2', 'l_9': 'blocks.9.mlp.fc1', 'l_10': 'blocks.9.mlp.act', 'l_11': 'blocks.9.mlp.drop', 'l_12': 'blocks.9.mlp.fc2', 'l_13': 'blocks.9.mlp.drop', 'l_14': 'blocks.9.drop_path', 'l_15': 'blocks.10.norm1', 'l_16': 'blocks.10.attn.qkv', 'l_17': 'blocks.10.attn.attn_drop', 'l_18': 'blocks.10.attn.proj', 'l_19': 'blocks.10.attn.proj_drop', 'l_20': 'blocks.10.drop_path', 'l_21': 'blocks.10.norm2', 'l_22': 'blocks.10.mlp.fc1', 'l_23': 'blocks.10.mlp.act', 'l_24': 'blocks.10.mlp.drop', 'l_25': 'blocks.10.mlp.fc2', 'l_26': 'blocks.10.mlp.drop', 'l_27': 'blocks.10.drop_path', 'l_28': 'blocks.11.norm1', 'l_29': 'blocks.11.attn.qkv', 'l_30': 'blocks.11.attn.attn_drop', 'l_31': 'blocks.11.attn.proj', 'l_32': 'blocks.11.attn.proj_drop', 'l_33': 'blocks.11.drop_path', 'l_34': 'blocks.11.norm2', 'l_35': 'blocks.11.mlp.fc1', 'l_36': 'blocks.11.mlp.act', 'l_37': 'blocks.11.mlp.drop', 'l_38': 'blocks.11.mlp.fc2', 'l_39': 'blocks.11.mlp.drop'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (x0 + t_0) t_1 = self.l_2(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_3(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_4(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = self.l_7(t_2) t_2 = (t_0 + t_2) t_0 = self.l_8(t_2) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_2 + t_0) t_2 = self.l_15(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_16(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_17(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = self.l_20(t_4) t_4 = (t_0 + t_4) t_0 = self.l_21(t_4) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = self.l_27(t_0) t_0 = (t_4 + t_0) t_4 = self.l_28(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_29(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_30(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = self.l_33(t_5) t_5 = (t_0 + t_5) t_0 = self.l_34(t_5) t_0 = self.l_35(t_0) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) t_0 = self.l_38(t_0) t_0 = self.l_39(t_0) return list(flatten((t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[11]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[12]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[12]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[13]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[13]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[14]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[14]/Identity[drop_path]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'blocks.11.drop_path', 'l_1': 'blocks.12.norm1', 'l_2': 'blocks.12.attn.qkv', 'l_3': 'blocks.12.attn.attn_drop', 'l_4': 'blocks.12.attn.proj', 'l_5': 'blocks.12.attn.proj_drop', 'l_6': 'blocks.12.drop_path', 'l_7': 'blocks.12.norm2', 'l_8': 'blocks.12.mlp.fc1', 'l_9': 'blocks.12.mlp.act', 'l_10': 'blocks.12.mlp.drop', 'l_11': 'blocks.12.mlp.fc2', 'l_12': 'blocks.12.mlp.drop', 'l_13': 'blocks.12.drop_path', 'l_14': 'blocks.13.norm1', 'l_15': 'blocks.13.attn.qkv', 'l_16': 'blocks.13.attn.attn_drop', 'l_17': 'blocks.13.attn.proj', 'l_18': 'blocks.13.attn.proj_drop', 'l_19': 'blocks.13.drop_path', 'l_20': 'blocks.13.norm2', 'l_21': 'blocks.13.mlp.fc1', 'l_22': 'blocks.13.mlp.act', 'l_23': 'blocks.13.mlp.drop', 'l_24': 'blocks.13.mlp.fc2', 'l_25': 'blocks.13.mlp.drop', 'l_26': 'blocks.13.drop_path', 'l_27': 'blocks.14.norm1', 'l_28': 'blocks.14.attn.qkv', 'l_29': 'blocks.14.attn.attn_drop', 'l_30': 'blocks.14.attn.proj', 'l_31': 'blocks.14.attn.proj_drop', 'l_32': 'blocks.14.drop_path', 'l_33': 'blocks.14.norm2', 'l_34': 'blocks.14.mlp.fc1', 'l_35': 'blocks.14.mlp.act', 'l_36': 'blocks.14.mlp.drop', 'l_37': 'blocks.14.mlp.fc2', 'l_38': 'blocks.14.mlp.drop', 'l_39': 'blocks.14.drop_path'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = (x0 + t_0) t_1 = self.l_1(t_0) t_2 = t_1.shape t_3 = t_2[0] t_4 = t_2[1] t_2 = t_2[2] t_1 = self.l_2(t_1) t_5 = (t_2 // 16) t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5) t_5 = t_5.permute(2, 0, 3, 1, 4) t_1 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_3(t_6) t_5 = (t_6 @ t_5) t_5 = t_5.transpose(1, 2) t_2 = t_5.reshape(t_3, t_4, t_2) t_2 = self.l_4(t_2) t_2 = self.l_5(t_2) t_2 = self.l_6(t_2) t_2 = (t_0 + t_2) t_0 = self.l_7(t_2) t_0 = self.l_8(t_0) t_0 = self.l_9(t_0) t_0 = self.l_10(t_0) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = (t_2 + t_0) t_2 = self.l_14(t_0) t_4 = t_2.shape t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_2 = self.l_15(t_2) t_6 = (t_4 // 16) t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_16(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_4 = t_6.reshape(t_3, t_5, t_4) t_4 = self.l_17(t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_4 = (t_0 + t_4) t_0 = self.l_20(t_4) t_0 = self.l_21(t_0) t_0 = self.l_22(t_0) t_0 = self.l_23(t_0) t_0 = self.l_24(t_0) t_0 = self.l_25(t_0) t_0 = self.l_26(t_0) t_0 = (t_4 + t_0) t_4 = self.l_27(t_0) t_5 = t_4.shape t_3 = t_5[0] t_6 = t_5[1] t_5 = t_5[2] t_4 = self.l_28(t_4) t_1 = (t_5 // 16) t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1) t_1 = t_1.permute(2, 0, 3, 1, 4) t_4 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_4 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_29(t_2) t_1 = (t_2 @ t_1) t_1 = t_1.transpose(1, 2) t_5 = t_1.reshape(t_3, t_6, t_5) t_5 = self.l_30(t_5) t_5 = self.l_31(t_5) t_5 = self.l_32(t_5) t_5 = (t_0 + t_5) t_0 = self.l_33(t_5) t_0 = self.l_34(t_0) t_0 = self.l_35(t_0) t_0 = self.l_36(t_0) t_0 = self.l_37(t_0) t_0 = self.l_38(t_0) t_0 = self.l_39(t_0) t_0 = (t_5 + t_0) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'blocks.15.norm1', 'l_1': 'blocks.15.attn.qkv', 'l_2': 'blocks.15.attn.attn_drop', 'l_3': 'blocks.15.attn.proj', 'l_4': 'blocks.15.attn.proj_drop', 'l_5': 'blocks.15.drop_path', 'l_6': 'blocks.15.norm2', 'l_7': 'blocks.15.mlp.fc1', 'l_8': 'blocks.15.mlp.act', 'l_9': 'blocks.15.mlp.drop', 'l_10': 'blocks.15.mlp.fc2', 'l_11': 'blocks.15.mlp.drop', 'l_12': 'blocks.15.drop_path', 'l_13': 'blocks.16.norm1', 'l_14': 'blocks.16.attn.qkv', 'l_15': 'blocks.16.attn.attn_drop', 'l_16': 'blocks.16.attn.proj', 'l_17': 'blocks.16.attn.proj_drop', 'l_18': 'blocks.16.drop_path', 'l_19': 'blocks.16.norm2', 'l_20': 'blocks.16.mlp.fc1', 'l_21': 'blocks.16.mlp.act', 'l_22': 'blocks.16.mlp.drop', 'l_23': 'blocks.16.mlp.fc2', 'l_24': 'blocks.16.mlp.drop', 'l_25': 'blocks.16.drop_path', 'l_26': 'blocks.17.norm1', 'l_27': 'blocks.17.attn.qkv', 'l_28': 'blocks.17.attn.attn_drop', 'l_29': 'blocks.17.attn.proj', 'l_30': 'blocks.17.attn.proj_drop', 'l_31': 'blocks.17.drop_path', 'l_32': 'blocks.17.norm2', 'l_33': 'blocks.17.mlp.fc1', 'l_34': 'blocks.17.mlp.act', 'l_35': 'blocks.17.mlp.drop', 'l_36': 'blocks.17.mlp.fc2', 'l_37': 'blocks.17.mlp.drop', 'l_38': 'blocks.17.drop_path'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_1 = t_0.shape t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_0 = self.l_1(t_0) t_4 = (t_1 // 16) t_4 = t_0.reshape(t_2, t_3, 3, 16, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_0 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_0 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_2(t_5) t_4 = (t_5 @ t_4) t_4 = t_4.transpose(1, 2) t_1 = t_4.reshape(t_2, t_3, t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = (x0 + t_1) t_3 = self.l_6(t_1) t_3 = self.l_7(t_3) t_3 = self.l_8(t_3) t_3 = self.l_9(t_3) t_3 = self.l_10(t_3) t_3 = self.l_11(t_3) t_3 = self.l_12(t_3) t_3 = (t_1 + t_3) t_1 = self.l_13(t_3) t_2 = t_1.shape t_4 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_1 = self.l_14(t_1) t_0 = (t_2 // 16) t_0 = t_1.reshape(t_4, t_5, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_1 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_15(t_6) t_0 = (t_6 @ t_0) t_0 = t_0.transpose(1, 2) t_2 = t_0.reshape(t_4, t_5, t_2) t_2 = self.l_16(t_2) t_2 = self.l_17(t_2) t_2 = self.l_18(t_2) t_2 = (t_3 + t_2) t_3 = self.l_19(t_2) t_3 = self.l_20(t_3) t_3 = self.l_21(t_3) t_3 = self.l_22(t_3) t_3 = self.l_23(t_3) t_3 = self.l_24(t_3) t_3 = self.l_25(t_3) t_3 = (t_2 + t_3) t_2 = self.l_26(t_3) t_5 = t_2.shape t_4 = t_5[0] t_0 = t_5[1] t_5 = t_5[2] t_2 = self.l_27(t_2) t_6 = (t_5 // 16) t_6 = t_2.reshape(t_4, t_0, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_28(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_5 = t_6.reshape(t_4, t_0, t_5) t_5 = self.l_29(t_5) t_5 = self.l_30(t_5) t_5 = self.l_31(t_5) t_5 = (t_3 + t_5) t_3 = self.l_32(t_5) t_3 = self.l_33(t_3) t_3 = self.l_34(t_3) t_3 = self.l_35(t_3) t_3 = self.l_36(t_3) t_3 = self.l_37(t_3) t_3 = self.l_38(t_3) t_3 = (t_5 + t_3) return (t_3,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm1]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1] self.lookup = {'l_0': 'blocks.18.norm1', 'l_1': 'blocks.18.attn.qkv', 'l_2': 'blocks.18.attn.attn_drop', 'l_3': 'blocks.18.attn.proj', 'l_4': 'blocks.18.attn.proj_drop', 'l_5': 'blocks.18.drop_path', 'l_6': 'blocks.18.norm2', 'l_7': 'blocks.18.mlp.fc1', 'l_8': 'blocks.18.mlp.act', 'l_9': 'blocks.18.mlp.drop', 'l_10': 'blocks.18.mlp.fc2', 'l_11': 'blocks.18.mlp.drop', 'l_12': 'blocks.18.drop_path', 'l_13': 'blocks.19.norm1', 'l_14': 'blocks.19.attn.qkv', 'l_15': 'blocks.19.attn.attn_drop', 'l_16': 'blocks.19.attn.proj', 'l_17': 'blocks.19.attn.proj_drop', 'l_18': 'blocks.19.drop_path', 'l_19': 'blocks.19.norm2', 'l_20': 'blocks.19.mlp.fc1', 'l_21': 'blocks.19.mlp.act', 'l_22': 'blocks.19.mlp.drop', 'l_23': 'blocks.19.mlp.fc2', 'l_24': 'blocks.19.mlp.drop', 'l_25': 'blocks.19.drop_path', 'l_26': 'blocks.20.norm1', 'l_27': 'blocks.20.attn.qkv', 'l_28': 'blocks.20.attn.attn_drop', 'l_29': 'blocks.20.attn.proj', 'l_30': 'blocks.20.attn.proj_drop', 'l_31': 'blocks.20.drop_path', 'l_32': 'blocks.20.norm2', 'l_33': 'blocks.20.mlp.fc1', 'l_34': 'blocks.20.mlp.act', 'l_35': 'blocks.20.mlp.drop', 'l_36': 'blocks.20.mlp.fc2', 'l_37': 'blocks.20.mlp.drop', 'l_38': 'blocks.20.drop_path', 'l_39': 'blocks.21.norm1'} self.to(self.device) def forward(self, *args): x0 = unflatten(args, self.input_structure)[0] t_0 = self.l_0(x0) t_1 = t_0.shape t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_0 = self.l_1(t_0) t_4 = (t_1 // 16) t_4 = t_0.reshape(t_2, t_3, 3, 16, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_0 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_0 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_2(t_5) t_4 = (t_5 @ t_4) t_4 = t_4.transpose(1, 2) t_1 = t_4.reshape(t_2, t_3, t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = (x0 + t_1) t_3 = self.l_6(t_1) t_3 = self.l_7(t_3) t_3 = self.l_8(t_3) t_3 = self.l_9(t_3) t_3 = self.l_10(t_3) t_3 = self.l_11(t_3) t_3 = self.l_12(t_3) t_3 = (t_1 + t_3) t_1 = self.l_13(t_3) t_2 = t_1.shape t_4 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_1 = self.l_14(t_1) t_0 = (t_2 // 16) t_0 = t_1.reshape(t_4, t_5, 3, 16, t_0) t_0 = t_0.permute(2, 0, 3, 1, 4) t_1 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_1 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_15(t_6) t_0 = (t_6 @ t_0) t_0 = t_0.transpose(1, 2) t_2 = t_0.reshape(t_4, t_5, t_2) t_2 = self.l_16(t_2) t_2 = self.l_17(t_2) t_2 = self.l_18(t_2) t_2 = (t_3 + t_2) t_3 = self.l_19(t_2) t_3 = self.l_20(t_3) t_3 = self.l_21(t_3) t_3 = self.l_22(t_3) t_3 = self.l_23(t_3) t_3 = self.l_24(t_3) t_3 = self.l_25(t_3) t_3 = (t_2 + t_3) t_2 = self.l_26(t_3) t_5 = t_2.shape t_4 = t_5[0] t_0 = t_5[1] t_5 = t_5[2] t_2 = self.l_27(t_2) t_6 = (t_5 // 16) t_6 = t_2.reshape(t_4, t_0, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_2 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_1 = t_1.transpose((- 2), (- 1)) t_1 = (t_2 @ t_1) t_1 = (t_1 * 0.125) t_1 = t_1.softmax(dim=(- 1)) t_1 = self.l_28(t_1) t_6 = (t_1 @ t_6) t_6 = t_6.transpose(1, 2) t_5 = t_6.reshape(t_4, t_0, t_5) t_5 = self.l_29(t_5) t_5 = self.l_30(t_5) t_5 = self.l_31(t_5) t_5 = (t_3 + t_5) t_3 = self.l_32(t_5) t_3 = self.l_33(t_3) t_3 = self.l_34(t_3) t_3 = self.l_35(t_3) t_3 = self.l_36(t_3) t_3 = self.l_37(t_3) t_3 = self.l_38(t_3) t_3 = (t_5 + t_3) t_5 = self.l_39(t_3) t_0 = t_5.shape return list(flatten((t_3, t_5, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[21]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[21]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[22]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[22]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[23]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[23]/Identity[drop_path]', 'VisionTransformer/LayerNorm[norm]', 'VisionTransformer/Linear[head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'blocks.21.attn.qkv', 'l_1': 'blocks.21.attn.attn_drop', 'l_2': 'blocks.21.attn.proj', 'l_3': 'blocks.21.attn.proj_drop', 'l_4': 'blocks.21.drop_path', 'l_5': 'blocks.21.norm2', 'l_6': 'blocks.21.mlp.fc1', 'l_7': 'blocks.21.mlp.act', 'l_8': 'blocks.21.mlp.drop', 'l_9': 'blocks.21.mlp.fc2', 'l_10': 'blocks.21.mlp.drop', 'l_11': 'blocks.21.drop_path', 'l_12': 'blocks.22.norm1', 'l_13': 'blocks.22.attn.qkv', 'l_14': 'blocks.22.attn.attn_drop', 'l_15': 'blocks.22.attn.proj', 'l_16': 'blocks.22.attn.proj_drop', 'l_17': 'blocks.22.drop_path', 'l_18': 'blocks.22.norm2', 'l_19': 'blocks.22.mlp.fc1', 'l_20': 'blocks.22.mlp.act', 'l_21': 'blocks.22.mlp.drop', 'l_22': 'blocks.22.mlp.fc2', 'l_23': 'blocks.22.mlp.drop', 'l_24': 'blocks.22.drop_path', 'l_25': 'blocks.23.norm1', 'l_26': 'blocks.23.attn.qkv', 'l_27': 'blocks.23.attn.attn_drop', 'l_28': 'blocks.23.attn.proj', 'l_29': 'blocks.23.attn.proj_drop', 'l_30': 'blocks.23.drop_path', 'l_31': 'blocks.23.norm2', 'l_32': 'blocks.23.mlp.fc1', 'l_33': 'blocks.23.mlp.act', 'l_34': 'blocks.23.mlp.drop', 'l_35': 'blocks.23.mlp.fc2', 'l_36': 'blocks.23.mlp.drop', 'l_37': 'blocks.23.drop_path', 'l_38': 'norm', 'l_39': 'head'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = x2[0] t_1 = x2[1] t_2 = x2[2] t_3 = self.l_0(x1) t_4 = (t_2 // 16) t_4 = t_3.reshape(t_0, t_1, 3, 16, t_4) t_4 = t_4.permute(2, 0, 3, 1, 4) t_3 = t_4[0] t_5 = t_4[1] t_4 = t_4[2] t_5 = t_5.transpose((- 2), (- 1)) t_5 = (t_3 @ t_5) t_5 = (t_5 * 0.125) t_5 = t_5.softmax(dim=(- 1)) t_5 = self.l_1(t_5) t_4 = (t_5 @ t_4) t_4 = t_4.transpose(1, 2) t_2 = t_4.reshape(t_0, t_1, t_2) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_2 = self.l_4(t_2) t_2 = (x0 + t_2) t_1 = self.l_5(t_2) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_1 = self.l_10(t_1) t_1 = self.l_11(t_1) t_1 = (t_2 + t_1) t_2 = self.l_12(t_1) t_0 = t_2.shape t_4 = t_0[0] t_5 = t_0[1] t_0 = t_0[2] t_2 = self.l_13(t_2) t_3 = (t_0 // 16) t_3 = t_2.reshape(t_4, t_5, 3, 16, t_3) t_3 = t_3.permute(2, 0, 3, 1, 4) t_2 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_6 = t_6.transpose((- 2), (- 1)) t_6 = (t_2 @ t_6) t_6 = (t_6 * 0.125) t_6 = t_6.softmax(dim=(- 1)) t_6 = self.l_14(t_6) t_3 = (t_6 @ t_3) t_3 = t_3.transpose(1, 2) t_0 = t_3.reshape(t_4, t_5, t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = self.l_17(t_0) t_0 = (t_1 + t_0) t_1 = self.l_18(t_0) t_1 = self.l_19(t_1) t_1 = self.l_20(t_1) t_1 = self.l_21(t_1) t_1 = self.l_22(t_1) t_1 = self.l_23(t_1) t_1 = self.l_24(t_1) t_1 = (t_0 + t_1) t_0 = self.l_25(t_1) t_5 = t_0.shape t_4 = t_5[0] t_3 = t_5[1] t_5 = t_5[2] t_0 = self.l_26(t_0) t_6 = (t_5 // 16) t_6 = t_0.reshape(t_4, t_3, 3, 16, t_6) t_6 = t_6.permute(2, 0, 3, 1, 4) t_0 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_2 = t_2.transpose((- 2), (- 1)) t_2 = (t_0 @ t_2) t_2 = (t_2 * 0.125) t_2 = t_2.softmax(dim=(- 1)) t_2 = self.l_27(t_2) t_6 = (t_2 @ t_6) t_6 = t_6.transpose(1, 2) t_5 = t_6.reshape(t_4, t_3, t_5) t_5 = self.l_28(t_5) t_5 = self.l_29(t_5) t_5 = self.l_30(t_5) t_5 = (t_1 + t_5) t_1 = self.l_31(t_5) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = self.l_36(t_1) t_1 = self.l_37(t_1) t_1 = (t_5 + t_1) t_1 = self.l_38(t_1) t_1 = t_1[(slice(None, None, None), 0)] t_1 = self.l_39(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, state): return load_state_dict(self, state) def named_parameters(self, recurse=True): return named_parameters(self, recurse=recurse) def named_buffers(self, recurse=True): return named_buffers(self, recurse=recurse) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=True)