code
stringlengths
17
6.64M
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def create_pipeline_configuration(DEBUG=False, batch_size=8): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (LayerNorm, Dropout, Softmax, Tanh, Linear, Embedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([8, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([8, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([8, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([8, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1093': {'shape': torch.Size([8, 384, 16, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([8, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([8, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1093': {'shape': torch.Size([8, 384, 16, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([8, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm', 'l_41': 'bert.encoder.3.attention.self.query', 'l_42': 'bert.encoder.3.attention.self.key', 'l_43': 'bert.encoder.3.attention.self.value', 'l_44': 'bert.encoder.3.attention.self.softmax', 'l_45': 'bert.encoder.3.attention.self.dropout', 'l_46': 'bert.encoder.3.attention.output.dense', 'l_47': 'bert.encoder.3.attention.output.dropout', 'l_48': 'bert.encoder.3.attention.output.LayerNorm', 'l_49': 'bert.encoder.3.intermediate.dense', 'l_50': 'bert.encoder.3.output.dense', 'l_51': 'bert.encoder.3.output.dropout', 'l_52': 'bert.encoder.3.output.LayerNorm', 'l_53': 'bert.encoder.4.attention.self.query', 'l_54': 'bert.encoder.4.attention.self.key', 'l_55': 'bert.encoder.4.attention.self.value', 'l_56': 'bert.encoder.4.attention.self.softmax', 'l_57': 'bert.encoder.4.attention.self.dropout', 'l_58': 'bert.encoder.4.attention.output.dense', 'l_59': 'bert.encoder.4.attention.output.dropout', 'l_60': 'bert.encoder.4.attention.output.LayerNorm', 'l_61': 'bert.encoder.4.intermediate.dense', 'l_62': 'bert.encoder.4.output.dense', 'l_63': 'bert.encoder.4.output.dropout', 'l_64': 'bert.encoder.4.output.LayerNorm', 'l_65': 'bert.encoder.5.attention.self.query', 'l_66': 'bert.encoder.5.attention.self.key', 'l_67': 'bert.encoder.5.attention.self.value', 'l_68': 'bert.encoder.5.attention.self.softmax', 'l_69': 'bert.encoder.5.attention.self.dropout', 'l_70': 'bert.encoder.5.attention.output.dense', 'l_71': 'bert.encoder.5.attention.output.dropout', 'l_72': 'bert.encoder.5.attention.output.LayerNorm', 'l_73': 'bert.encoder.5.intermediate.dense', 'l_74': 'bert.encoder.5.output.dense', 'l_75': 'bert.encoder.5.output.dropout', 'l_76': 'bert.encoder.5.output.LayerNorm', 'l_77': 'bert.encoder.6.attention.self.query', 'l_78': 'bert.encoder.6.attention.self.key', 'l_79': 'bert.encoder.6.attention.self.value', 'l_80': 'bert.encoder.6.attention.self.softmax', 'l_81': 'bert.encoder.6.attention.self.dropout', 'l_82': 'bert.encoder.6.attention.output.dense', 'l_83': 'bert.encoder.6.attention.output.dropout', 'l_84': 'bert.encoder.6.attention.output.LayerNorm', 'l_85': 'bert.encoder.6.intermediate.dense', 'l_86': 'bert.encoder.6.output.dense', 'l_87': 'bert.encoder.6.output.dropout', 'l_88': 'bert.encoder.6.output.LayerNorm', 'l_89': 'bert.encoder.7.attention.self.query', 'l_90': 'bert.encoder.7.attention.self.key', 'l_91': 'bert.encoder.7.attention.self.value', 'l_92': 'bert.encoder.7.attention.self.softmax', 'l_93': 'bert.encoder.7.attention.self.dropout', 'l_94': 'bert.encoder.7.attention.output.dense', 'l_95': 'bert.encoder.7.attention.output.dropout', 'l_96': 'bert.encoder.7.attention.output.LayerNorm', 'l_97': 'bert.encoder.7.intermediate.dense', 'l_98': 'bert.encoder.7.output.dense', 'l_99': 'bert.encoder.7.output.dropout', 'l_100': 'bert.encoder.7.output.LayerNorm', 'l_101': 'bert.encoder.8.attention.self.query', 'l_102': 'bert.encoder.8.attention.self.key', 'l_103': 'bert.encoder.8.attention.self.value', 'l_104': 'bert.encoder.8.attention.self.softmax', 'l_105': 'bert.encoder.8.attention.self.dropout', 'l_106': 'bert.encoder.8.attention.output.dense', 'l_107': 'bert.encoder.8.attention.output.dropout', 'l_108': 'bert.encoder.8.attention.output.LayerNorm', 'l_109': 'bert.encoder.8.intermediate.dense', 'l_110': 'bert.encoder.8.output.dense', 'l_111': 'bert.encoder.8.output.dropout', 'l_112': 'bert.encoder.8.output.LayerNorm', 'l_113': 'bert.encoder.9.attention.self.query', 'l_114': 'bert.encoder.9.attention.self.key', 'l_115': 'bert.encoder.9.attention.self.value', 'l_116': 'bert.encoder.9.attention.self.softmax', 'l_117': 'bert.encoder.9.attention.self.dropout', 'l_118': 'bert.encoder.9.attention.output.dense', 'l_119': 'bert.encoder.9.attention.output.dropout', 'l_120': 'bert.encoder.9.attention.output.LayerNorm', 'l_121': 'bert.encoder.9.intermediate.dense', 'l_122': 'bert.encoder.9.output.dense', 'l_123': 'bert.encoder.9.output.dropout', 'l_124': 'bert.encoder.9.output.LayerNorm', 'l_125': 'bert.encoder.10.attention.self.query', 'l_126': 'bert.encoder.10.attention.self.key', 'l_127': 'bert.encoder.10.attention.self.value', 'l_128': 'bert.encoder.10.attention.self.softmax', 'l_129': 'bert.encoder.10.attention.self.dropout'} self.to(self.device) def forward(self, *args): (attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure) t_0 = self.l_0(input_ids) t_1 = self.l_2(token_type_ids) t_2 = attention_mask.unsqueeze(1) t_2 = t_2.unsqueeze(2) t_2 = t_2.to(dtype=torch.float32) t_2 = (1.0 - t_2) t_2 = (t_2 * (- 10000.0)) t_3 = input_ids.size(1) t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_3 = t_3.unsqueeze(0) t_3 = t_3.expand_as(input_ids) t_3 = self.l_1(t_3) t_3 = (t_0 + t_3) t_1 = (t_3 + t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_3 = self.l_5(t_1) t_0 = self.l_6(t_1) t_4 = self.l_7(t_1) t_5 = t_3.size() t_6 = t_0.size() t_7 = t_4.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_9 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_9, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_10 = t_6[0] t_9 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_10, t_9, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_9 = t_7[1] t_10 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_8, t_9, t_10, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_5, t_6) t_5 = math.sqrt(64) t_5 = (t_6 / t_5) t_5 = (t_5 + t_2) t_5 = self.l_8(t_5) t_5 = self.l_9(t_5) t_7 = torch.matmul(t_5, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_5 = t_7.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_6 = t_5[0] t_10 = t_5[1] t_5 = t_5[2] t_5 = t_7.view(t_6, t_10, t_5) t_5 = self.l_10(t_5) t_5 = self.l_11(t_5) t_1 = (t_5 + t_1) t_1 = self.l_12(t_1) t_5 = self.l_13(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_14(t_5) t_5 = self.l_15(t_5) t_1 = (t_5 + t_1) t_1 = self.l_16(t_1) t_5 = self.l_17(t_1) t_10 = self.l_18(t_1) t_6 = self.l_19(t_1) t_7 = t_5.size() t_9 = t_10.size() t_8 = t_6.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_4 = t_7[0] t_0 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_4, t_0, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_3 = t_9[0] t_0 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_3, t_0, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_4 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_4, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_7, t_9) t_7 = math.sqrt(64) t_7 = (t_9 / t_7) t_7 = (t_7 + t_2) t_7 = self.l_20(t_7) t_7 = self.l_21(t_7) t_8 = torch.matmul(t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_7 = t_8.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_9 = t_7[0] t_3 = t_7[1] t_7 = t_7[2] t_7 = t_8.view(t_9, t_3, t_7) t_7 = self.l_22(t_7) t_7 = self.l_23(t_7) t_1 = (t_7 + t_1) t_1 = self.l_24(t_1) t_7 = self.l_25(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_26(t_7) t_7 = self.l_27(t_7) t_1 = (t_7 + t_1) t_1 = self.l_28(t_1) t_7 = self.l_29(t_1) t_3 = self.l_30(t_1) t_9 = self.l_31(t_1) t_8 = t_7.size() t_0 = t_3.size() t_4 = t_9.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_6 = t_8[0] t_10 = t_8[1] t_5 = t_8[2] t_8 = t_8[3] t_8 = t_7.view(t_6, t_10, t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_10 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_3.view(t_5, t_10, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_6 = t_4[0] t_10 = t_4[1] t_5 = t_4[2] t_4 = t_4[3] t_4 = t_9.view(t_6, t_10, t_5, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_8, t_0) t_8 = math.sqrt(64) t_8 = (t_0 / t_8) t_8 = (t_8 + t_2) t_8 = self.l_32(t_8) t_8 = self.l_33(t_8) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_8 = t_4.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_0 = t_8[0] t_5 = t_8[1] t_8 = t_8[2] t_8 = t_4.view(t_0, t_5, t_8) t_8 = self.l_34(t_8) t_8 = self.l_35(t_8) t_1 = (t_8 + t_1) t_1 = self.l_36(t_1) t_8 = self.l_37(t_1) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_38(t_8) t_8 = self.l_39(t_8) t_1 = (t_8 + t_1) t_1 = self.l_40(t_1) t_8 = self.l_41(t_1) t_5 = self.l_42(t_1) t_0 = self.l_43(t_1) t_4 = t_8.size() t_10 = t_5.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_9 = t_4[0] t_3 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_8.view(t_9, t_3, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_7 = t_10[0] t_3 = t_10[1] t_9 = t_10[2] t_10 = t_10[3] t_10 = t_5.view(t_7, t_3, t_9, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_9 = t_6[0] t_3 = t_6[1] t_7 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_9, t_3, t_7, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_10 = t_10.transpose((- 1), (- 2)) t_10 = torch.matmul(t_4, t_10) t_4 = math.sqrt(64) t_4 = (t_10 / t_4) t_4 = (t_4 + t_2) t_4 = self.l_44(t_4) t_4 = self.l_45(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_10 = t_4[0] t_7 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_10, t_7, t_4) t_4 = self.l_46(t_4) t_4 = self.l_47(t_4) t_1 = (t_4 + t_1) t_1 = self.l_48(t_1) t_4 = self.l_49(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_50(t_4) t_4 = self.l_51(t_4) t_1 = (t_4 + t_1) t_1 = self.l_52(t_1) t_4 = self.l_53(t_1) t_7 = self.l_54(t_1) t_10 = self.l_55(t_1) t_6 = t_4.size() t_3 = t_7.size() t_9 = t_10.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_5 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_5, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_5 = t_3[1] t_0 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_5, t_0, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_0 = t_9[0] t_5 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_0, t_5, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_6, t_3) t_6 = math.sqrt(64) t_6 = (t_3 / t_6) t_6 = (t_6 + t_2) t_6 = self.l_56(t_6) t_6 = self.l_57(t_6) t_9 = torch.matmul(t_6, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_9 = t_9.contiguous() t_6 = t_9.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_3 = t_6[0] t_8 = t_6[1] t_6 = t_6[2] t_6 = t_9.view(t_3, t_8, t_6) t_6 = self.l_58(t_6) t_6 = self.l_59(t_6) t_1 = (t_6 + t_1) t_1 = self.l_60(t_1) t_6 = self.l_61(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_62(t_6) t_6 = self.l_63(t_6) t_1 = (t_6 + t_1) t_1 = self.l_64(t_1) t_6 = self.l_65(t_1) t_8 = self.l_66(t_1) t_3 = self.l_67(t_1) t_9 = t_6.size() t_5 = t_8.size() t_0 = t_3.size() t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_10 = t_9[0] t_7 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_6.view(t_10, t_7, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_4 = t_5[0] t_7 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_8.view(t_4, t_7, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_10 = t_0[0] t_7 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_3.view(t_10, t_7, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_9, t_5) t_9 = math.sqrt(64) t_9 = (t_5 / t_9) t_9 = (t_9 + t_2) t_9 = self.l_68(t_9) t_9 = self.l_69(t_9) t_0 = torch.matmul(t_9, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_9 = t_0.size() t_9 = t_9[slice(None, (- 2), None)] t_9 = (t_9 + (1024,)) t_5 = t_9[0] t_4 = t_9[1] t_9 = t_9[2] t_9 = t_0.view(t_5, t_4, t_9) t_9 = self.l_70(t_9) t_9 = self.l_71(t_9) t_1 = (t_9 + t_1) t_1 = self.l_72(t_1) t_9 = self.l_73(t_1) t_9 = torch.nn.functional.gelu(t_9) t_9 = self.l_74(t_9) t_9 = self.l_75(t_9) t_1 = (t_9 + t_1) t_1 = self.l_76(t_1) t_9 = self.l_77(t_1) t_4 = self.l_78(t_1) t_5 = self.l_79(t_1) t_0 = t_9.size() t_7 = t_4.size() t_10 = t_5.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_3 = t_0[0] t_8 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_9.view(t_3, t_8, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_6 = t_7[0] t_8 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_6, t_8, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_3 = t_10[0] t_8 = t_10[1] t_6 = t_10[2] t_10 = t_10[3] t_10 = t_5.view(t_3, t_8, t_6, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_0, t_7) t_0 = math.sqrt(64) t_0 = (t_7 / t_0) t_0 = (t_0 + t_2) t_0 = self.l_80(t_0) t_0 = self.l_81(t_0) t_10 = torch.matmul(t_0, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_10 = t_10.contiguous() t_0 = t_10.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_7 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_10.view(t_7, t_6, t_0) t_0 = self.l_82(t_0) t_0 = self.l_83(t_0) t_1 = (t_0 + t_1) t_1 = self.l_84(t_1) t_0 = self.l_85(t_1) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_86(t_0) t_0 = self.l_87(t_0) t_1 = (t_0 + t_1) t_1 = self.l_88(t_1) t_0 = self.l_89(t_1) t_6 = self.l_90(t_1) t_7 = self.l_91(t_1) t_10 = t_0.size() t_8 = t_6.size() t_3 = t_7.size() t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_5 = t_10[0] t_4 = t_10[1] t_9 = t_10[2] t_10 = t_10[3] t_10 = t_0.view(t_5, t_4, t_9, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_4 = t_8[1] t_5 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_9, t_4, t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_4 = t_3[1] t_9 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_5, t_4, t_9, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_10, t_8) t_10 = math.sqrt(64) t_10 = (t_8 / t_10) t_10 = (t_10 + t_2) t_10 = self.l_92(t_10) t_10 = self.l_93(t_10) t_3 = torch.matmul(t_10, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_10 = t_3.size() t_10 = t_10[slice(None, (- 2), None)] t_10 = (t_10 + (1024,)) t_8 = t_10[0] t_9 = t_10[1] t_10 = t_10[2] t_10 = t_3.view(t_8, t_9, t_10) t_10 = self.l_94(t_10) t_10 = self.l_95(t_10) t_1 = (t_10 + t_1) t_1 = self.l_96(t_1) t_10 = self.l_97(t_1) t_10 = torch.nn.functional.gelu(t_10) t_10 = self.l_98(t_10) t_10 = self.l_99(t_10) t_1 = (t_10 + t_1) t_1 = self.l_100(t_1) t_10 = self.l_101(t_1) t_9 = self.l_102(t_1) t_8 = self.l_103(t_1) t_3 = t_10.size() t_4 = t_9.size() t_5 = t_8.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_6 = t_3[1] t_0 = t_3[2] t_3 = t_3[3] t_3 = t_10.view(t_7, t_6, t_0, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_0 = t_4[0] t_6 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_9.view(t_0, t_6, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_7 = t_5[0] t_6 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_8.view(t_7, t_6, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_3, t_4) t_3 = math.sqrt(64) t_3 = (t_4 / t_3) t_3 = (t_3 + t_2) t_3 = self.l_104(t_3) t_3 = self.l_105(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_4 = t_3[0] t_0 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_4, t_0, t_3) t_3 = self.l_106(t_3) t_3 = self.l_107(t_3) t_1 = (t_3 + t_1) t_1 = self.l_108(t_1) t_3 = self.l_109(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_110(t_3) t_3 = self.l_111(t_3) t_1 = (t_3 + t_1) t_1 = self.l_112(t_1) t_3 = self.l_113(t_1) t_0 = self.l_114(t_1) t_4 = self.l_115(t_1) t_5 = t_3.size() t_6 = t_0.size() t_7 = t_4.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_9 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_9, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_10 = t_6[0] t_9 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_10, t_9, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_9 = t_7[1] t_10 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_8, t_9, t_10, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_5, t_6) t_5 = math.sqrt(64) t_5 = (t_6 / t_5) t_5 = (t_5 + t_2) t_5 = self.l_116(t_5) t_5 = self.l_117(t_5) t_7 = torch.matmul(t_5, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_5 = t_7.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_6 = t_5[0] t_10 = t_5[1] t_5 = t_5[2] t_5 = t_7.view(t_6, t_10, t_5) t_5 = self.l_118(t_5) t_5 = self.l_119(t_5) t_1 = (t_5 + t_1) t_1 = self.l_120(t_1) t_5 = self.l_121(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_122(t_5) t_5 = self.l_123(t_5) t_1 = (t_5 + t_1) t_1 = self.l_124(t_1) t_5 = self.l_125(t_1) t_10 = self.l_126(t_1) t_6 = self.l_127(t_1) t_7 = t_5.size() t_9 = t_10.size() t_8 = t_6.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_4 = t_7[0] t_0 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_4, t_0, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_3 = t_9[0] t_0 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_3, t_0, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_4 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_4, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_7, t_9) t_7 = math.sqrt(64) t_7 = (t_9 / t_7) t_7 = (t_7 + t_2) t_7 = self.l_128(t_7) t_7 = self.l_129(t_7) t_8 = torch.matmul(t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) return list(flatten((t_2, t_1, t_8))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.10.attention.output.dense', 'l_1': 'bert.encoder.10.attention.output.dropout', 'l_2': 'bert.encoder.10.attention.output.LayerNorm', 'l_3': 'bert.encoder.10.intermediate.dense', 'l_4': 'bert.encoder.10.output.dense', 'l_5': 'bert.encoder.10.output.dropout', 'l_6': 'bert.encoder.10.output.LayerNorm', 'l_7': 'bert.encoder.11.attention.self.query', 'l_8': 'bert.encoder.11.attention.self.key', 'l_9': 'bert.encoder.11.attention.self.value', 'l_10': 'bert.encoder.11.attention.self.softmax', 'l_11': 'bert.encoder.11.attention.self.dropout', 'l_12': 'bert.encoder.11.attention.output.dense', 'l_13': 'bert.encoder.11.attention.output.dropout', 'l_14': 'bert.encoder.11.attention.output.LayerNorm', 'l_15': 'bert.encoder.11.intermediate.dense', 'l_16': 'bert.encoder.11.output.dense', 'l_17': 'bert.encoder.11.output.dropout', 'l_18': 'bert.encoder.11.output.LayerNorm', 'l_19': 'bert.encoder.12.attention.self.query', 'l_20': 'bert.encoder.12.attention.self.key', 'l_21': 'bert.encoder.12.attention.self.value', 'l_22': 'bert.encoder.12.attention.self.softmax', 'l_23': 'bert.encoder.12.attention.self.dropout', 'l_24': 'bert.encoder.12.attention.output.dense', 'l_25': 'bert.encoder.12.attention.output.dropout', 'l_26': 'bert.encoder.12.attention.output.LayerNorm', 'l_27': 'bert.encoder.12.intermediate.dense', 'l_28': 'bert.encoder.12.output.dense', 'l_29': 'bert.encoder.12.output.dropout', 'l_30': 'bert.encoder.12.output.LayerNorm', 'l_31': 'bert.encoder.13.attention.self.query', 'l_32': 'bert.encoder.13.attention.self.key', 'l_33': 'bert.encoder.13.attention.self.value', 'l_34': 'bert.encoder.13.attention.self.softmax', 'l_35': 'bert.encoder.13.attention.self.dropout', 'l_36': 'bert.encoder.13.attention.output.dense', 'l_37': 'bert.encoder.13.attention.output.dropout', 'l_38': 'bert.encoder.13.attention.output.LayerNorm', 'l_39': 'bert.encoder.13.intermediate.dense', 'l_40': 'bert.encoder.13.output.dense', 'l_41': 'bert.encoder.13.output.dropout', 'l_42': 'bert.encoder.13.output.LayerNorm', 'l_43': 'bert.encoder.14.attention.self.query', 'l_44': 'bert.encoder.14.attention.self.key', 'l_45': 'bert.encoder.14.attention.self.value', 'l_46': 'bert.encoder.14.attention.self.softmax', 'l_47': 'bert.encoder.14.attention.self.dropout', 'l_48': 'bert.encoder.14.attention.output.dense', 'l_49': 'bert.encoder.14.attention.output.dropout', 'l_50': 'bert.encoder.14.attention.output.LayerNorm', 'l_51': 'bert.encoder.14.intermediate.dense', 'l_52': 'bert.encoder.14.output.dense', 'l_53': 'bert.encoder.14.output.dropout', 'l_54': 'bert.encoder.14.output.LayerNorm', 'l_55': 'bert.encoder.15.attention.self.query', 'l_56': 'bert.encoder.15.attention.self.key', 'l_57': 'bert.encoder.15.attention.self.value', 'l_58': 'bert.encoder.15.attention.self.softmax', 'l_59': 'bert.encoder.15.attention.self.dropout', 'l_60': 'bert.encoder.15.attention.output.dense', 'l_61': 'bert.encoder.15.attention.output.dropout', 'l_62': 'bert.encoder.15.attention.output.LayerNorm', 'l_63': 'bert.encoder.15.intermediate.dense', 'l_64': 'bert.encoder.15.output.dense', 'l_65': 'bert.encoder.15.output.dropout', 'l_66': 'bert.encoder.15.output.LayerNorm', 'l_67': 'bert.encoder.16.attention.self.query', 'l_68': 'bert.encoder.16.attention.self.key', 'l_69': 'bert.encoder.16.attention.self.value', 'l_70': 'bert.encoder.16.attention.self.softmax', 'l_71': 'bert.encoder.16.attention.self.dropout', 'l_72': 'bert.encoder.16.attention.output.dense', 'l_73': 'bert.encoder.16.attention.output.dropout', 'l_74': 'bert.encoder.16.attention.output.LayerNorm', 'l_75': 'bert.encoder.16.intermediate.dense', 'l_76': 'bert.encoder.16.output.dense', 'l_77': 'bert.encoder.16.output.dropout', 'l_78': 'bert.encoder.16.output.LayerNorm', 'l_79': 'bert.encoder.17.attention.self.query', 'l_80': 'bert.encoder.17.attention.self.key', 'l_81': 'bert.encoder.17.attention.self.value', 'l_82': 'bert.encoder.17.attention.self.softmax', 'l_83': 'bert.encoder.17.attention.self.dropout', 'l_84': 'bert.encoder.17.attention.output.dense', 'l_85': 'bert.encoder.17.attention.output.dropout', 'l_86': 'bert.encoder.17.attention.output.LayerNorm', 'l_87': 'bert.encoder.17.intermediate.dense', 'l_88': 'bert.encoder.17.output.dense', 'l_89': 'bert.encoder.17.output.dropout', 'l_90': 'bert.encoder.17.output.LayerNorm', 'l_91': 'bert.encoder.18.attention.self.query', 'l_92': 'bert.encoder.18.attention.self.key', 'l_93': 'bert.encoder.18.attention.self.value', 'l_94': 'bert.encoder.18.attention.self.softmax', 'l_95': 'bert.encoder.18.attention.self.dropout', 'l_96': 'bert.encoder.18.attention.output.dense', 'l_97': 'bert.encoder.18.attention.output.dropout', 'l_98': 'bert.encoder.18.attention.output.LayerNorm', 'l_99': 'bert.encoder.18.intermediate.dense', 'l_100': 'bert.encoder.18.output.dense', 'l_101': 'bert.encoder.18.output.dropout', 'l_102': 'bert.encoder.18.output.LayerNorm', 'l_103': 'bert.encoder.19.attention.self.query', 'l_104': 'bert.encoder.19.attention.self.key', 'l_105': 'bert.encoder.19.attention.self.value', 'l_106': 'bert.encoder.19.attention.self.softmax', 'l_107': 'bert.encoder.19.attention.self.dropout', 'l_108': 'bert.encoder.19.attention.output.dense', 'l_109': 'bert.encoder.19.attention.output.dropout', 'l_110': 'bert.encoder.19.attention.output.LayerNorm', 'l_111': 'bert.encoder.19.intermediate.dense', 'l_112': 'bert.encoder.19.output.dense', 'l_113': 'bert.encoder.19.output.dropout', 'l_114': 'bert.encoder.19.output.LayerNorm', 'l_115': 'bert.encoder.20.attention.self.query', 'l_116': 'bert.encoder.20.attention.self.key', 'l_117': 'bert.encoder.20.attention.self.value', 'l_118': 'bert.encoder.20.attention.self.softmax', 'l_119': 'bert.encoder.20.attention.self.dropout', 'l_120': 'bert.encoder.20.attention.output.dense', 'l_121': 'bert.encoder.20.attention.output.dropout', 'l_122': 'bert.encoder.20.attention.output.LayerNorm', 'l_123': 'bert.encoder.20.intermediate.dense', 'l_124': 'bert.encoder.20.output.dense', 'l_125': 'bert.encoder.20.output.dropout', 'l_126': 'bert.encoder.20.output.LayerNorm', 'l_127': 'bert.encoder.21.attention.self.query', 'l_128': 'bert.encoder.21.attention.self.key', 'l_129': 'bert.encoder.21.attention.self.value', 'l_130': 'bert.encoder.21.attention.self.softmax', 'l_131': 'bert.encoder.21.attention.self.dropout', 'l_132': 'bert.encoder.21.attention.output.dense', 'l_133': 'bert.encoder.21.attention.output.dropout', 'l_134': 'bert.encoder.21.attention.output.LayerNorm', 'l_135': 'bert.encoder.21.intermediate.dense', 'l_136': 'bert.encoder.21.output.dense', 'l_137': 'bert.encoder.21.output.dropout', 'l_138': 'bert.encoder.21.output.LayerNorm', 'l_139': 'bert.encoder.22.attention.self.query', 'l_140': 'bert.encoder.22.attention.self.key', 'l_141': 'bert.encoder.22.attention.self.value', 'l_142': 'bert.encoder.22.attention.self.softmax', 'l_143': 'bert.encoder.22.attention.self.dropout', 'l_144': 'bert.encoder.22.attention.output.dense', 'l_145': 'bert.encoder.22.attention.output.dropout', 'l_146': 'bert.encoder.22.attention.output.LayerNorm', 'l_147': 'bert.encoder.22.intermediate.dense', 'l_148': 'bert.encoder.22.output.dense', 'l_149': 'bert.encoder.22.output.dropout', 'l_150': 'bert.encoder.22.output.LayerNorm', 'l_151': 'bert.encoder.23.attention.self.query', 'l_152': 'bert.encoder.23.attention.self.key', 'l_153': 'bert.encoder.23.attention.self.value', 'l_154': 'bert.encoder.23.attention.self.softmax', 'l_155': 'bert.encoder.23.attention.self.dropout', 'l_156': 'bert.encoder.23.attention.output.dense', 'l_157': 'bert.encoder.23.attention.output.dropout', 'l_158': 'bert.encoder.23.attention.output.LayerNorm', 'l_159': 'bert.encoder.23.intermediate.dense', 'l_160': 'bert.encoder.23.output.dense', 'l_161': 'bert.encoder.23.output.dropout', 'l_162': 'bert.encoder.23.output.LayerNorm', 'l_163': 'bert.pooler.dense', 'l_164': 'bert.pooler.activation', 'l_165': 'qa_outputs'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = x2.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_2, t_3, t_1) t_1 = self.l_0(t_1) t_1 = self.l_1(t_1) t_1 = (t_1 + x1) t_1 = self.l_2(t_1) t_3 = self.l_3(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_4(t_3) t_3 = self.l_5(t_3) t_1 = (t_3 + t_1) t_1 = self.l_6(t_1) t_3 = self.l_7(t_1) t_2 = self.l_8(t_1) t_0 = self.l_9(t_1) t_4 = t_3.size() t_5 = t_2.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + x0) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_1 = (t_4 + t_1) t_1 = self.l_14(t_1) t_4 = self.l_15(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_16(t_4) t_4 = self.l_17(t_4) t_1 = (t_4 + t_1) t_1 = self.l_18(t_1) t_4 = self.l_19(t_1) t_9 = self.l_20(t_1) t_5 = self.l_21(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_2 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_2, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_2 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_2, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_2 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_2, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + x0) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_1 = (t_6 + t_1) t_1 = self.l_26(t_1) t_6 = self.l_27(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_28(t_6) t_6 = self.l_29(t_6) t_1 = (t_6 + t_1) t_1 = self.l_30(t_1) t_6 = self.l_31(t_1) t_3 = self.l_32(t_1) t_8 = self.l_33(t_1) t_7 = t_6.size() t_2 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + x0) t_7 = self.l_34(t_7) t_7 = self.l_35(t_7) t_0 = torch.matmul(t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_7 = t_0.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_0.view(t_2, t_4, t_7) t_7 = self.l_36(t_7) t_7 = self.l_37(t_7) t_1 = (t_7 + t_1) t_1 = self.l_38(t_1) t_7 = self.l_39(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_40(t_7) t_7 = self.l_41(t_7) t_1 = (t_7 + t_1) t_1 = self.l_42(t_1) t_7 = self.l_43(t_1) t_4 = self.l_44(t_1) t_2 = self.l_45(t_1) t_0 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_8 = t_0[0] t_3 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_7.view(t_8, t_3, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_3 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_3, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_3 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_3, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_0, t_9) t_0 = math.sqrt(64) t_0 = (t_9 / t_0) t_0 = (t_0 + x0) t_0 = self.l_46(t_0) t_0 = self.l_47(t_0) t_5 = torch.matmul(t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_0 = t_5.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_9 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_5.view(t_9, t_6, t_0) t_0 = self.l_48(t_0) t_0 = self.l_49(t_0) t_1 = (t_0 + t_1) t_1 = self.l_50(t_1) t_0 = self.l_51(t_1) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_52(t_0) t_0 = self.l_53(t_0) t_1 = (t_0 + t_1) t_1 = self.l_54(t_1) t_0 = self.l_55(t_1) t_6 = self.l_56(t_1) t_9 = self.l_57(t_1) t_5 = t_0.size() t_3 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_2 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_2, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_4 = t_3[1] t_2 = t_3[2] t_3 = t_3[3] t_3 = t_6.view(t_7, t_4, t_2, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_5, t_3) t_5 = math.sqrt(64) t_5 = (t_3 / t_5) t_5 = (t_5 + x0) t_5 = self.l_58(t_5) t_5 = self.l_59(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_3 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_3, t_7, t_5) t_5 = self.l_60(t_5) t_5 = self.l_61(t_5) t_1 = (t_5 + t_1) t_1 = self.l_62(t_1) t_5 = self.l_63(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_64(t_5) t_5 = self.l_65(t_5) t_1 = (t_5 + t_1) t_1 = self.l_66(t_1) t_5 = self.l_67(t_1) t_7 = self.l_68(t_1) t_3 = self.l_69(t_1) t_8 = t_5.size() t_4 = t_7.size() t_2 = t_3.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_0 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_0, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_9 = t_2[0] t_6 = t_2[1] t_0 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_9, t_6, t_0, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) t_8 = math.sqrt(64) t_8 = (t_4 / t_8) t_8 = (t_8 + x0) t_8 = self.l_70(t_8) t_8 = self.l_71(t_8) t_2 = torch.matmul(t_8, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_2 = t_2.contiguous() t_8 = t_2.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_4 = t_8[0] t_0 = t_8[1] t_8 = t_8[2] t_8 = t_2.view(t_4, t_0, t_8) t_8 = self.l_72(t_8) t_8 = self.l_73(t_8) t_1 = (t_8 + t_1) t_1 = self.l_74(t_1) t_8 = self.l_75(t_1) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_76(t_8) t_8 = self.l_77(t_8) t_1 = (t_8 + t_1) t_1 = self.l_78(t_1) t_8 = self.l_79(t_1) t_0 = self.l_80(t_1) t_4 = self.l_81(t_1) t_2 = t_8.size() t_6 = t_0.size() t_9 = t_4.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_3 = t_2[0] t_7 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_8.view(t_3, t_7, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_5 = t_6[0] t_7 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_5, t_7, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_3 = t_9[0] t_7 = t_9[1] t_5 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_3, t_7, t_5, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_2, t_6) t_2 = math.sqrt(64) t_2 = (t_6 / t_2) t_2 = (t_2 + x0) t_2 = self.l_82(t_2) t_2 = self.l_83(t_2) t_9 = torch.matmul(t_2, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_9 = t_9.contiguous() t_2 = t_9.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_6 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_2 = t_9.view(t_6, t_5, t_2) t_2 = self.l_84(t_2) t_2 = self.l_85(t_2) t_1 = (t_2 + t_1) t_1 = self.l_86(t_1) t_2 = self.l_87(t_1) t_2 = torch.nn.functional.gelu(t_2) t_2 = self.l_88(t_2) t_2 = self.l_89(t_2) t_1 = (t_2 + t_1) t_1 = self.l_90(t_1) t_2 = self.l_91(t_1) t_5 = self.l_92(t_1) t_6 = self.l_93(t_1) t_9 = t_2.size() t_7 = t_5.size() t_3 = t_6.size() t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_4 = t_9[0] t_0 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_2.view(t_4, t_0, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_0 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_8, t_0, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_4 = t_3[0] t_0 = t_3[1] t_8 = t_3[2] t_3 = t_3[3] t_3 = t_6.view(t_4, t_0, t_8, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_9, t_7) t_9 = math.sqrt(64) t_9 = (t_7 / t_9) t_9 = (t_9 + x0) t_9 = self.l_94(t_9) t_9 = self.l_95(t_9) t_3 = torch.matmul(t_9, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_9 = t_3.size() t_9 = t_9[slice(None, (- 2), None)] t_9 = (t_9 + (1024,)) t_7 = t_9[0] t_8 = t_9[1] t_9 = t_9[2] t_9 = t_3.view(t_7, t_8, t_9) t_9 = self.l_96(t_9) t_9 = self.l_97(t_9) t_1 = (t_9 + t_1) t_1 = self.l_98(t_1) t_9 = self.l_99(t_1) t_9 = torch.nn.functional.gelu(t_9) t_9 = self.l_100(t_9) t_9 = self.l_101(t_9) t_1 = (t_9 + t_1) t_1 = self.l_102(t_1) t_9 = self.l_103(t_1) t_8 = self.l_104(t_1) t_7 = self.l_105(t_1) t_3 = t_9.size() t_0 = t_8.size() t_4 = t_7.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_6 = t_3[0] t_5 = t_3[1] t_2 = t_3[2] t_3 = t_3[3] t_3 = t_9.view(t_6, t_5, t_2, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_2 = t_0[0] t_5 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_2, t_5, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_6 = t_4[0] t_5 = t_4[1] t_2 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_6, t_5, t_2, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_3, t_0) t_3 = math.sqrt(64) t_3 = (t_0 / t_3) t_3 = (t_3 + x0) t_3 = self.l_106(t_3) t_3 = self.l_107(t_3) t_4 = torch.matmul(t_3, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_3 = t_4.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_0 = t_3[0] t_2 = t_3[1] t_3 = t_3[2] t_3 = t_4.view(t_0, t_2, t_3) t_3 = self.l_108(t_3) t_3 = self.l_109(t_3) t_1 = (t_3 + t_1) t_1 = self.l_110(t_1) t_3 = self.l_111(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_112(t_3) t_3 = self.l_113(t_3) t_1 = (t_3 + t_1) t_1 = self.l_114(t_1) t_3 = self.l_115(t_1) t_2 = self.l_116(t_1) t_0 = self.l_117(t_1) t_4 = t_3.size() t_5 = t_2.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + x0) t_4 = self.l_118(t_4) t_4 = self.l_119(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_120(t_4) t_4 = self.l_121(t_4) t_1 = (t_4 + t_1) t_1 = self.l_122(t_1) t_4 = self.l_123(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_124(t_4) t_4 = self.l_125(t_4) t_1 = (t_4 + t_1) t_1 = self.l_126(t_1) t_4 = self.l_127(t_1) t_9 = self.l_128(t_1) t_5 = self.l_129(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_2 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_2, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_2 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_2, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_2 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_2, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + x0) t_6 = self.l_130(t_6) t_6 = self.l_131(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_132(t_6) t_6 = self.l_133(t_6) t_1 = (t_6 + t_1) t_1 = self.l_134(t_1) t_6 = self.l_135(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_136(t_6) t_6 = self.l_137(t_6) t_1 = (t_6 + t_1) t_1 = self.l_138(t_1) t_6 = self.l_139(t_1) t_3 = self.l_140(t_1) t_8 = self.l_141(t_1) t_7 = t_6.size() t_2 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + x0) t_7 = self.l_142(t_7) t_7 = self.l_143(t_7) t_0 = torch.matmul(t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_7 = t_0.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_0.view(t_2, t_4, t_7) t_7 = self.l_144(t_7) t_7 = self.l_145(t_7) t_1 = (t_7 + t_1) t_1 = self.l_146(t_1) t_7 = self.l_147(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_148(t_7) t_7 = self.l_149(t_7) t_1 = (t_7 + t_1) t_1 = self.l_150(t_1) t_7 = self.l_151(t_1) t_4 = self.l_152(t_1) t_2 = self.l_153(t_1) t_0 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_8 = t_0[0] t_3 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_7.view(t_8, t_3, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_3 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_3, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_3 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_3, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_0, t_9) t_0 = math.sqrt(64) t_0 = (t_9 / t_0) t_0 = (t_0 + x0) t_0 = self.l_154(t_0) t_0 = self.l_155(t_0) t_5 = torch.matmul(t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_0 = t_5.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_9 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_5.view(t_9, t_6, t_0) t_0 = self.l_156(t_0) t_0 = self.l_157(t_0) t_1 = (t_0 + t_1) t_1 = self.l_158(t_1) t_0 = self.l_159(t_1) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_160(t_0) t_0 = self.l_161(t_0) t_1 = (t_0 + t_1) t_1 = self.l_162(t_1) t_0 = self.l_165(t_1) t_1 = t_1[(slice(None, None, None), 0)] t_1 = self.l_163(t_1) t_1 = self.l_164(t_1) return (t_0,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def bert_large_uncased_whole_word_maskings_384_2p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': False, 'return_dict': False}, do_resize_token_embedding=False)
def create_pipeline_configuration(DEBUG=False, batch_size=1): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Dropout, Softmax, Tanh, Embedding, Linear, LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([1, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 1}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([1, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([1, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([1, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/torch.nn.functional::gelu_1211': {'shape': torch.Size([1, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 1}, 1: {'stage_cls': Partition1, 'inputs': {'BertForQuestionAnswering/BertModel[bert]/Tensor::__mul___12': {'shape': torch.Size([1, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([1, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/torch.nn.functional::gelu_1211': {'shape': torch.Size([1, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([1, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm', 'l_41': 'bert.encoder.3.attention.self.query', 'l_42': 'bert.encoder.3.attention.self.key', 'l_43': 'bert.encoder.3.attention.self.value', 'l_44': 'bert.encoder.3.attention.self.softmax', 'l_45': 'bert.encoder.3.attention.self.dropout', 'l_46': 'bert.encoder.3.attention.output.dense', 'l_47': 'bert.encoder.3.attention.output.dropout', 'l_48': 'bert.encoder.3.attention.output.LayerNorm', 'l_49': 'bert.encoder.3.intermediate.dense', 'l_50': 'bert.encoder.3.output.dense', 'l_51': 'bert.encoder.3.output.dropout', 'l_52': 'bert.encoder.3.output.LayerNorm', 'l_53': 'bert.encoder.4.attention.self.query', 'l_54': 'bert.encoder.4.attention.self.key', 'l_55': 'bert.encoder.4.attention.self.value', 'l_56': 'bert.encoder.4.attention.self.softmax', 'l_57': 'bert.encoder.4.attention.self.dropout', 'l_58': 'bert.encoder.4.attention.output.dense', 'l_59': 'bert.encoder.4.attention.output.dropout', 'l_60': 'bert.encoder.4.attention.output.LayerNorm', 'l_61': 'bert.encoder.4.intermediate.dense', 'l_62': 'bert.encoder.4.output.dense', 'l_63': 'bert.encoder.4.output.dropout', 'l_64': 'bert.encoder.4.output.LayerNorm', 'l_65': 'bert.encoder.5.attention.self.query', 'l_66': 'bert.encoder.5.attention.self.key', 'l_67': 'bert.encoder.5.attention.self.value', 'l_68': 'bert.encoder.5.attention.self.softmax', 'l_69': 'bert.encoder.5.attention.self.dropout', 'l_70': 'bert.encoder.5.attention.output.dense', 'l_71': 'bert.encoder.5.attention.output.dropout', 'l_72': 'bert.encoder.5.attention.output.LayerNorm', 'l_73': 'bert.encoder.5.intermediate.dense', 'l_74': 'bert.encoder.5.output.dense', 'l_75': 'bert.encoder.5.output.dropout', 'l_76': 'bert.encoder.5.output.LayerNorm', 'l_77': 'bert.encoder.6.attention.self.query', 'l_78': 'bert.encoder.6.attention.self.key', 'l_79': 'bert.encoder.6.attention.self.value', 'l_80': 'bert.encoder.6.attention.self.softmax', 'l_81': 'bert.encoder.6.attention.self.dropout', 'l_82': 'bert.encoder.6.attention.output.dense', 'l_83': 'bert.encoder.6.attention.output.dropout', 'l_84': 'bert.encoder.6.attention.output.LayerNorm', 'l_85': 'bert.encoder.6.intermediate.dense', 'l_86': 'bert.encoder.6.output.dense', 'l_87': 'bert.encoder.6.output.dropout', 'l_88': 'bert.encoder.6.output.LayerNorm', 'l_89': 'bert.encoder.7.attention.self.query', 'l_90': 'bert.encoder.7.attention.self.key', 'l_91': 'bert.encoder.7.attention.self.value', 'l_92': 'bert.encoder.7.attention.self.softmax', 'l_93': 'bert.encoder.7.attention.self.dropout', 'l_94': 'bert.encoder.7.attention.output.dense', 'l_95': 'bert.encoder.7.attention.output.dropout', 'l_96': 'bert.encoder.7.attention.output.LayerNorm', 'l_97': 'bert.encoder.7.intermediate.dense', 'l_98': 'bert.encoder.7.output.dense', 'l_99': 'bert.encoder.7.output.dropout', 'l_100': 'bert.encoder.7.output.LayerNorm', 'l_101': 'bert.encoder.8.attention.self.query', 'l_102': 'bert.encoder.8.attention.self.key', 'l_103': 'bert.encoder.8.attention.self.value', 'l_104': 'bert.encoder.8.attention.self.softmax', 'l_105': 'bert.encoder.8.attention.self.dropout', 'l_106': 'bert.encoder.8.attention.output.dense', 'l_107': 'bert.encoder.8.attention.output.dropout', 'l_108': 'bert.encoder.8.attention.output.LayerNorm', 'l_109': 'bert.encoder.8.intermediate.dense', 'l_110': 'bert.encoder.8.output.dense', 'l_111': 'bert.encoder.8.output.dropout', 'l_112': 'bert.encoder.8.output.LayerNorm', 'l_113': 'bert.encoder.9.attention.self.query', 'l_114': 'bert.encoder.9.attention.self.key', 'l_115': 'bert.encoder.9.attention.self.value', 'l_116': 'bert.encoder.9.attention.self.softmax', 'l_117': 'bert.encoder.9.attention.self.dropout', 'l_118': 'bert.encoder.9.attention.output.dense', 'l_119': 'bert.encoder.9.attention.output.dropout', 'l_120': 'bert.encoder.9.attention.output.LayerNorm', 'l_121': 'bert.encoder.9.intermediate.dense', 'l_122': 'bert.encoder.9.output.dense', 'l_123': 'bert.encoder.9.output.dropout', 'l_124': 'bert.encoder.9.output.LayerNorm', 'l_125': 'bert.encoder.10.attention.self.query', 'l_126': 'bert.encoder.10.attention.self.key', 'l_127': 'bert.encoder.10.attention.self.value', 'l_128': 'bert.encoder.10.attention.self.softmax', 'l_129': 'bert.encoder.10.attention.self.dropout', 'l_130': 'bert.encoder.10.attention.output.dense', 'l_131': 'bert.encoder.10.attention.output.dropout', 'l_132': 'bert.encoder.10.attention.output.LayerNorm', 'l_133': 'bert.encoder.10.intermediate.dense', 'l_134': 'bert.encoder.10.output.dense', 'l_135': 'bert.encoder.10.output.dropout', 'l_136': 'bert.encoder.10.output.LayerNorm', 'l_137': 'bert.encoder.11.attention.self.query', 'l_138': 'bert.encoder.11.attention.self.key', 'l_139': 'bert.encoder.11.attention.self.value', 'l_140': 'bert.encoder.11.attention.self.softmax', 'l_141': 'bert.encoder.11.attention.self.dropout', 'l_142': 'bert.encoder.11.attention.output.dense', 'l_143': 'bert.encoder.11.attention.output.dropout', 'l_144': 'bert.encoder.11.attention.output.LayerNorm', 'l_145': 'bert.encoder.11.intermediate.dense'} self.to(self.device) def forward(self, *args): (attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure) t_0 = self.l_0(input_ids) t_1 = self.l_2(token_type_ids) t_2 = attention_mask.unsqueeze(1) t_2 = t_2.unsqueeze(2) t_2 = t_2.to(dtype=torch.float32) t_2 = (1.0 - t_2) t_2 = (t_2 * (- 10000.0)) t_3 = input_ids.size(1) t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_3 = t_3.unsqueeze(0) t_3 = t_3.expand_as(input_ids) t_3 = self.l_1(t_3) t_3 = (t_0 + t_3) t_1 = (t_3 + t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_3 = self.l_5(t_1) t_0 = self.l_6(t_1) t_4 = self.l_7(t_1) t_5 = t_3.size() t_6 = t_0.size() t_7 = t_4.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_9 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_9, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_10 = t_6[0] t_9 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_10, t_9, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_9 = t_7[1] t_10 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_8, t_9, t_10, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_5, t_6) t_5 = math.sqrt(64) t_5 = (t_6 / t_5) t_5 = (t_5 + t_2) t_5 = self.l_8(t_5) t_5 = self.l_9(t_5) t_7 = torch.matmul(t_5, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_5 = t_7.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_6 = t_5[0] t_10 = t_5[1] t_5 = t_5[2] t_5 = t_7.view(t_6, t_10, t_5) t_5 = self.l_10(t_5) t_5 = self.l_11(t_5) t_1 = (t_5 + t_1) t_1 = self.l_12(t_1) t_5 = self.l_13(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_14(t_5) t_5 = self.l_15(t_5) t_1 = (t_5 + t_1) t_1 = self.l_16(t_1) t_5 = self.l_17(t_1) t_10 = self.l_18(t_1) t_6 = self.l_19(t_1) t_7 = t_5.size() t_9 = t_10.size() t_8 = t_6.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_4 = t_7[0] t_0 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_4, t_0, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_3 = t_9[0] t_0 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_3, t_0, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_4 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_4, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_7, t_9) t_7 = math.sqrt(64) t_7 = (t_9 / t_7) t_7 = (t_7 + t_2) t_7 = self.l_20(t_7) t_7 = self.l_21(t_7) t_8 = torch.matmul(t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_7 = t_8.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_9 = t_7[0] t_3 = t_7[1] t_7 = t_7[2] t_7 = t_8.view(t_9, t_3, t_7) t_7 = self.l_22(t_7) t_7 = self.l_23(t_7) t_1 = (t_7 + t_1) t_1 = self.l_24(t_1) t_7 = self.l_25(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_26(t_7) t_7 = self.l_27(t_7) t_1 = (t_7 + t_1) t_1 = self.l_28(t_1) t_7 = self.l_29(t_1) t_3 = self.l_30(t_1) t_9 = self.l_31(t_1) t_8 = t_7.size() t_0 = t_3.size() t_4 = t_9.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_6 = t_8[0] t_10 = t_8[1] t_5 = t_8[2] t_8 = t_8[3] t_8 = t_7.view(t_6, t_10, t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_10 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_3.view(t_5, t_10, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_6 = t_4[0] t_10 = t_4[1] t_5 = t_4[2] t_4 = t_4[3] t_4 = t_9.view(t_6, t_10, t_5, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_8, t_0) t_8 = math.sqrt(64) t_8 = (t_0 / t_8) t_8 = (t_8 + t_2) t_8 = self.l_32(t_8) t_8 = self.l_33(t_8) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_8 = t_4.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_0 = t_8[0] t_5 = t_8[1] t_8 = t_8[2] t_8 = t_4.view(t_0, t_5, t_8) t_8 = self.l_34(t_8) t_8 = self.l_35(t_8) t_1 = (t_8 + t_1) t_1 = self.l_36(t_1) t_8 = self.l_37(t_1) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_38(t_8) t_8 = self.l_39(t_8) t_1 = (t_8 + t_1) t_1 = self.l_40(t_1) t_8 = self.l_41(t_1) t_5 = self.l_42(t_1) t_0 = self.l_43(t_1) t_4 = t_8.size() t_10 = t_5.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_9 = t_4[0] t_3 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_8.view(t_9, t_3, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_7 = t_10[0] t_3 = t_10[1] t_9 = t_10[2] t_10 = t_10[3] t_10 = t_5.view(t_7, t_3, t_9, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_9 = t_6[0] t_3 = t_6[1] t_7 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_9, t_3, t_7, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_10 = t_10.transpose((- 1), (- 2)) t_10 = torch.matmul(t_4, t_10) t_4 = math.sqrt(64) t_4 = (t_10 / t_4) t_4 = (t_4 + t_2) t_4 = self.l_44(t_4) t_4 = self.l_45(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_10 = t_4[0] t_7 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_10, t_7, t_4) t_4 = self.l_46(t_4) t_4 = self.l_47(t_4) t_1 = (t_4 + t_1) t_1 = self.l_48(t_1) t_4 = self.l_49(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_50(t_4) t_4 = self.l_51(t_4) t_1 = (t_4 + t_1) t_1 = self.l_52(t_1) t_4 = self.l_53(t_1) t_7 = self.l_54(t_1) t_10 = self.l_55(t_1) t_6 = t_4.size() t_3 = t_7.size() t_9 = t_10.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_5 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_5, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_5 = t_3[1] t_0 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_5, t_0, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_0 = t_9[0] t_5 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_0, t_5, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_6, t_3) t_6 = math.sqrt(64) t_6 = (t_3 / t_6) t_6 = (t_6 + t_2) t_6 = self.l_56(t_6) t_6 = self.l_57(t_6) t_9 = torch.matmul(t_6, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_9 = t_9.contiguous() t_6 = t_9.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_3 = t_6[0] t_8 = t_6[1] t_6 = t_6[2] t_6 = t_9.view(t_3, t_8, t_6) t_6 = self.l_58(t_6) t_6 = self.l_59(t_6) t_1 = (t_6 + t_1) t_1 = self.l_60(t_1) t_6 = self.l_61(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_62(t_6) t_6 = self.l_63(t_6) t_1 = (t_6 + t_1) t_1 = self.l_64(t_1) t_6 = self.l_65(t_1) t_8 = self.l_66(t_1) t_3 = self.l_67(t_1) t_9 = t_6.size() t_5 = t_8.size() t_0 = t_3.size() t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_10 = t_9[0] t_7 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_6.view(t_10, t_7, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_4 = t_5[0] t_7 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_8.view(t_4, t_7, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_10 = t_0[0] t_7 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_3.view(t_10, t_7, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_9, t_5) t_9 = math.sqrt(64) t_9 = (t_5 / t_9) t_9 = (t_9 + t_2) t_9 = self.l_68(t_9) t_9 = self.l_69(t_9) t_0 = torch.matmul(t_9, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_9 = t_0.size() t_9 = t_9[slice(None, (- 2), None)] t_9 = (t_9 + (1024,)) t_5 = t_9[0] t_4 = t_9[1] t_9 = t_9[2] t_9 = t_0.view(t_5, t_4, t_9) t_9 = self.l_70(t_9) t_9 = self.l_71(t_9) t_1 = (t_9 + t_1) t_1 = self.l_72(t_1) t_9 = self.l_73(t_1) t_9 = torch.nn.functional.gelu(t_9) t_9 = self.l_74(t_9) t_9 = self.l_75(t_9) t_1 = (t_9 + t_1) t_1 = self.l_76(t_1) t_9 = self.l_77(t_1) t_4 = self.l_78(t_1) t_5 = self.l_79(t_1) t_0 = t_9.size() t_7 = t_4.size() t_10 = t_5.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_3 = t_0[0] t_8 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_9.view(t_3, t_8, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_6 = t_7[0] t_8 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_6, t_8, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_3 = t_10[0] t_8 = t_10[1] t_6 = t_10[2] t_10 = t_10[3] t_10 = t_5.view(t_3, t_8, t_6, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_0, t_7) t_0 = math.sqrt(64) t_0 = (t_7 / t_0) t_0 = (t_0 + t_2) t_0 = self.l_80(t_0) t_0 = self.l_81(t_0) t_10 = torch.matmul(t_0, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_10 = t_10.contiguous() t_0 = t_10.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_7 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_10.view(t_7, t_6, t_0) t_0 = self.l_82(t_0) t_0 = self.l_83(t_0) t_1 = (t_0 + t_1) t_1 = self.l_84(t_1) t_0 = self.l_85(t_1) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_86(t_0) t_0 = self.l_87(t_0) t_1 = (t_0 + t_1) t_1 = self.l_88(t_1) t_0 = self.l_89(t_1) t_6 = self.l_90(t_1) t_7 = self.l_91(t_1) t_10 = t_0.size() t_8 = t_6.size() t_3 = t_7.size() t_10 = t_10[slice(None, (- 1), None)] t_10 = (t_10 + (16, 64)) t_5 = t_10[0] t_4 = t_10[1] t_9 = t_10[2] t_10 = t_10[3] t_10 = t_0.view(t_5, t_4, t_9, t_10) t_10 = t_10.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_4 = t_8[1] t_5 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_9, t_4, t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_4 = t_3[1] t_9 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_5, t_4, t_9, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_10, t_8) t_10 = math.sqrt(64) t_10 = (t_8 / t_10) t_10 = (t_10 + t_2) t_10 = self.l_92(t_10) t_10 = self.l_93(t_10) t_3 = torch.matmul(t_10, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_10 = t_3.size() t_10 = t_10[slice(None, (- 2), None)] t_10 = (t_10 + (1024,)) t_8 = t_10[0] t_9 = t_10[1] t_10 = t_10[2] t_10 = t_3.view(t_8, t_9, t_10) t_10 = self.l_94(t_10) t_10 = self.l_95(t_10) t_1 = (t_10 + t_1) t_1 = self.l_96(t_1) t_10 = self.l_97(t_1) t_10 = torch.nn.functional.gelu(t_10) t_10 = self.l_98(t_10) t_10 = self.l_99(t_10) t_1 = (t_10 + t_1) t_1 = self.l_100(t_1) t_10 = self.l_101(t_1) t_9 = self.l_102(t_1) t_8 = self.l_103(t_1) t_3 = t_10.size() t_4 = t_9.size() t_5 = t_8.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_6 = t_3[1] t_0 = t_3[2] t_3 = t_3[3] t_3 = t_10.view(t_7, t_6, t_0, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_0 = t_4[0] t_6 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_9.view(t_0, t_6, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_7 = t_5[0] t_6 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_8.view(t_7, t_6, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_3, t_4) t_3 = math.sqrt(64) t_3 = (t_4 / t_3) t_3 = (t_3 + t_2) t_3 = self.l_104(t_3) t_3 = self.l_105(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_4 = t_3[0] t_0 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_4, t_0, t_3) t_3 = self.l_106(t_3) t_3 = self.l_107(t_3) t_1 = (t_3 + t_1) t_1 = self.l_108(t_1) t_3 = self.l_109(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_110(t_3) t_3 = self.l_111(t_3) t_1 = (t_3 + t_1) t_1 = self.l_112(t_1) t_3 = self.l_113(t_1) t_0 = self.l_114(t_1) t_4 = self.l_115(t_1) t_5 = t_3.size() t_6 = t_0.size() t_7 = t_4.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_9 = t_5[1] t_10 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_9, t_10, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_10 = t_6[0] t_9 = t_6[1] t_8 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_10, t_9, t_8, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_9 = t_7[1] t_10 = t_7[2] t_7 = t_7[3] t_7 = t_4.view(t_8, t_9, t_10, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_5, t_6) t_5 = math.sqrt(64) t_5 = (t_6 / t_5) t_5 = (t_5 + t_2) t_5 = self.l_116(t_5) t_5 = self.l_117(t_5) t_7 = torch.matmul(t_5, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_5 = t_7.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_6 = t_5[0] t_10 = t_5[1] t_5 = t_5[2] t_5 = t_7.view(t_6, t_10, t_5) t_5 = self.l_118(t_5) t_5 = self.l_119(t_5) t_1 = (t_5 + t_1) t_1 = self.l_120(t_1) t_5 = self.l_121(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_122(t_5) t_5 = self.l_123(t_5) t_1 = (t_5 + t_1) t_1 = self.l_124(t_1) t_5 = self.l_125(t_1) t_10 = self.l_126(t_1) t_6 = self.l_127(t_1) t_7 = t_5.size() t_9 = t_10.size() t_8 = t_6.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_4 = t_7[0] t_0 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_4, t_0, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_3 = t_9[0] t_0 = t_9[1] t_4 = t_9[2] t_9 = t_9[3] t_9 = t_10.view(t_3, t_0, t_4, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_4 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_6.view(t_4, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_7, t_9) t_7 = math.sqrt(64) t_7 = (t_9 / t_7) t_7 = (t_7 + t_2) t_7 = self.l_128(t_7) t_7 = self.l_129(t_7) t_8 = torch.matmul(t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_7 = t_8.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_9 = t_7[0] t_3 = t_7[1] t_7 = t_7[2] t_7 = t_8.view(t_9, t_3, t_7) t_7 = self.l_130(t_7) t_7 = self.l_131(t_7) t_1 = (t_7 + t_1) t_1 = self.l_132(t_1) t_7 = self.l_133(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_134(t_7) t_7 = self.l_135(t_7) t_1 = (t_7 + t_1) t_1 = self.l_136(t_1) t_7 = self.l_137(t_1) t_3 = self.l_138(t_1) t_9 = self.l_139(t_1) t_8 = t_7.size() t_0 = t_3.size() t_4 = t_9.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_6 = t_8[0] t_10 = t_8[1] t_5 = t_8[2] t_8 = t_8[3] t_8 = t_7.view(t_6, t_10, t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_10 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_3.view(t_5, t_10, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_6 = t_4[0] t_10 = t_4[1] t_5 = t_4[2] t_4 = t_4[3] t_4 = t_9.view(t_6, t_10, t_5, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_8, t_0) t_8 = math.sqrt(64) t_8 = (t_0 / t_8) t_8 = (t_8 + t_2) t_8 = self.l_140(t_8) t_8 = self.l_141(t_8) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_8 = t_4.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_0 = t_8[0] t_5 = t_8[1] t_8 = t_8[2] t_8 = t_4.view(t_0, t_5, t_8) t_8 = self.l_142(t_8) t_8 = self.l_143(t_8) t_1 = (t_8 + t_1) t_1 = self.l_144(t_1) t_8 = self.l_145(t_1) t_8 = torch.nn.functional.gelu(t_8) return list(flatten((t_2, t_1, t_8))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.11.output.dense', 'l_1': 'bert.encoder.11.output.dropout', 'l_2': 'bert.encoder.11.output.LayerNorm', 'l_3': 'bert.encoder.12.attention.self.query', 'l_4': 'bert.encoder.12.attention.self.key', 'l_5': 'bert.encoder.12.attention.self.value', 'l_6': 'bert.encoder.12.attention.self.softmax', 'l_7': 'bert.encoder.12.attention.self.dropout', 'l_8': 'bert.encoder.12.attention.output.dense', 'l_9': 'bert.encoder.12.attention.output.dropout', 'l_10': 'bert.encoder.12.attention.output.LayerNorm', 'l_11': 'bert.encoder.12.intermediate.dense', 'l_12': 'bert.encoder.12.output.dense', 'l_13': 'bert.encoder.12.output.dropout', 'l_14': 'bert.encoder.12.output.LayerNorm', 'l_15': 'bert.encoder.13.attention.self.query', 'l_16': 'bert.encoder.13.attention.self.key', 'l_17': 'bert.encoder.13.attention.self.value', 'l_18': 'bert.encoder.13.attention.self.softmax', 'l_19': 'bert.encoder.13.attention.self.dropout', 'l_20': 'bert.encoder.13.attention.output.dense', 'l_21': 'bert.encoder.13.attention.output.dropout', 'l_22': 'bert.encoder.13.attention.output.LayerNorm', 'l_23': 'bert.encoder.13.intermediate.dense', 'l_24': 'bert.encoder.13.output.dense', 'l_25': 'bert.encoder.13.output.dropout', 'l_26': 'bert.encoder.13.output.LayerNorm', 'l_27': 'bert.encoder.14.attention.self.query', 'l_28': 'bert.encoder.14.attention.self.key', 'l_29': 'bert.encoder.14.attention.self.value', 'l_30': 'bert.encoder.14.attention.self.softmax', 'l_31': 'bert.encoder.14.attention.self.dropout', 'l_32': 'bert.encoder.14.attention.output.dense', 'l_33': 'bert.encoder.14.attention.output.dropout', 'l_34': 'bert.encoder.14.attention.output.LayerNorm', 'l_35': 'bert.encoder.14.intermediate.dense', 'l_36': 'bert.encoder.14.output.dense', 'l_37': 'bert.encoder.14.output.dropout', 'l_38': 'bert.encoder.14.output.LayerNorm', 'l_39': 'bert.encoder.15.attention.self.query', 'l_40': 'bert.encoder.15.attention.self.key', 'l_41': 'bert.encoder.15.attention.self.value', 'l_42': 'bert.encoder.15.attention.self.softmax', 'l_43': 'bert.encoder.15.attention.self.dropout', 'l_44': 'bert.encoder.15.attention.output.dense', 'l_45': 'bert.encoder.15.attention.output.dropout', 'l_46': 'bert.encoder.15.attention.output.LayerNorm', 'l_47': 'bert.encoder.15.intermediate.dense', 'l_48': 'bert.encoder.15.output.dense', 'l_49': 'bert.encoder.15.output.dropout', 'l_50': 'bert.encoder.15.output.LayerNorm', 'l_51': 'bert.encoder.16.attention.self.query', 'l_52': 'bert.encoder.16.attention.self.key', 'l_53': 'bert.encoder.16.attention.self.value', 'l_54': 'bert.encoder.16.attention.self.softmax', 'l_55': 'bert.encoder.16.attention.self.dropout', 'l_56': 'bert.encoder.16.attention.output.dense', 'l_57': 'bert.encoder.16.attention.output.dropout', 'l_58': 'bert.encoder.16.attention.output.LayerNorm', 'l_59': 'bert.encoder.16.intermediate.dense', 'l_60': 'bert.encoder.16.output.dense', 'l_61': 'bert.encoder.16.output.dropout', 'l_62': 'bert.encoder.16.output.LayerNorm', 'l_63': 'bert.encoder.17.attention.self.query', 'l_64': 'bert.encoder.17.attention.self.key', 'l_65': 'bert.encoder.17.attention.self.value', 'l_66': 'bert.encoder.17.attention.self.softmax', 'l_67': 'bert.encoder.17.attention.self.dropout', 'l_68': 'bert.encoder.17.attention.output.dense', 'l_69': 'bert.encoder.17.attention.output.dropout', 'l_70': 'bert.encoder.17.attention.output.LayerNorm', 'l_71': 'bert.encoder.17.intermediate.dense', 'l_72': 'bert.encoder.17.output.dense', 'l_73': 'bert.encoder.17.output.dropout', 'l_74': 'bert.encoder.17.output.LayerNorm', 'l_75': 'bert.encoder.18.attention.self.query', 'l_76': 'bert.encoder.18.attention.self.key', 'l_77': 'bert.encoder.18.attention.self.value', 'l_78': 'bert.encoder.18.attention.self.softmax', 'l_79': 'bert.encoder.18.attention.self.dropout', 'l_80': 'bert.encoder.18.attention.output.dense', 'l_81': 'bert.encoder.18.attention.output.dropout', 'l_82': 'bert.encoder.18.attention.output.LayerNorm', 'l_83': 'bert.encoder.18.intermediate.dense', 'l_84': 'bert.encoder.18.output.dense', 'l_85': 'bert.encoder.18.output.dropout', 'l_86': 'bert.encoder.18.output.LayerNorm', 'l_87': 'bert.encoder.19.attention.self.query', 'l_88': 'bert.encoder.19.attention.self.key', 'l_89': 'bert.encoder.19.attention.self.value', 'l_90': 'bert.encoder.19.attention.self.softmax', 'l_91': 'bert.encoder.19.attention.self.dropout', 'l_92': 'bert.encoder.19.attention.output.dense', 'l_93': 'bert.encoder.19.attention.output.dropout', 'l_94': 'bert.encoder.19.attention.output.LayerNorm', 'l_95': 'bert.encoder.19.intermediate.dense', 'l_96': 'bert.encoder.19.output.dense', 'l_97': 'bert.encoder.19.output.dropout', 'l_98': 'bert.encoder.19.output.LayerNorm', 'l_99': 'bert.encoder.20.attention.self.query', 'l_100': 'bert.encoder.20.attention.self.key', 'l_101': 'bert.encoder.20.attention.self.value', 'l_102': 'bert.encoder.20.attention.self.softmax', 'l_103': 'bert.encoder.20.attention.self.dropout', 'l_104': 'bert.encoder.20.attention.output.dense', 'l_105': 'bert.encoder.20.attention.output.dropout', 'l_106': 'bert.encoder.20.attention.output.LayerNorm', 'l_107': 'bert.encoder.20.intermediate.dense', 'l_108': 'bert.encoder.20.output.dense', 'l_109': 'bert.encoder.20.output.dropout', 'l_110': 'bert.encoder.20.output.LayerNorm', 'l_111': 'bert.encoder.21.attention.self.query', 'l_112': 'bert.encoder.21.attention.self.key', 'l_113': 'bert.encoder.21.attention.self.value', 'l_114': 'bert.encoder.21.attention.self.softmax', 'l_115': 'bert.encoder.21.attention.self.dropout', 'l_116': 'bert.encoder.21.attention.output.dense', 'l_117': 'bert.encoder.21.attention.output.dropout', 'l_118': 'bert.encoder.21.attention.output.LayerNorm', 'l_119': 'bert.encoder.21.intermediate.dense', 'l_120': 'bert.encoder.21.output.dense', 'l_121': 'bert.encoder.21.output.dropout', 'l_122': 'bert.encoder.21.output.LayerNorm', 'l_123': 'bert.encoder.22.attention.self.query', 'l_124': 'bert.encoder.22.attention.self.key', 'l_125': 'bert.encoder.22.attention.self.value', 'l_126': 'bert.encoder.22.attention.self.softmax', 'l_127': 'bert.encoder.22.attention.self.dropout', 'l_128': 'bert.encoder.22.attention.output.dense', 'l_129': 'bert.encoder.22.attention.output.dropout', 'l_130': 'bert.encoder.22.attention.output.LayerNorm', 'l_131': 'bert.encoder.22.intermediate.dense', 'l_132': 'bert.encoder.22.output.dense', 'l_133': 'bert.encoder.22.output.dropout', 'l_134': 'bert.encoder.22.output.LayerNorm', 'l_135': 'bert.encoder.23.attention.self.query', 'l_136': 'bert.encoder.23.attention.self.key', 'l_137': 'bert.encoder.23.attention.self.value', 'l_138': 'bert.encoder.23.attention.self.softmax', 'l_139': 'bert.encoder.23.attention.self.dropout', 'l_140': 'bert.encoder.23.attention.output.dense', 'l_141': 'bert.encoder.23.attention.output.dropout', 'l_142': 'bert.encoder.23.attention.output.LayerNorm', 'l_143': 'bert.encoder.23.intermediate.dense', 'l_144': 'bert.encoder.23.output.dense', 'l_145': 'bert.encoder.23.output.dropout', 'l_146': 'bert.encoder.23.output.LayerNorm', 'l_147': 'bert.pooler.dense', 'l_148': 'bert.pooler.activation', 'l_149': 'qa_outputs'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x2) t_0 = self.l_1(t_0) t_0 = (t_0 + x1) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_2 = self.l_4(t_0) t_3 = self.l_5(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + x0) t_4 = self.l_6(t_4) t_4 = self.l_7(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_0 = (t_4 + t_0) t_0 = self.l_10(t_0) t_4 = self.l_11(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_0 = (t_4 + t_0) t_0 = self.l_14(t_0) t_4 = self.l_15(t_0) t_9 = self.l_16(t_0) t_5 = self.l_17(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + x0) t_6 = self.l_18(t_6) t_6 = self.l_19(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_0 = (t_6 + t_0) t_0 = self.l_22(t_0) t_6 = self.l_23(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_0 = (t_6 + t_0) t_0 = self.l_26(t_0) t_6 = self.l_27(t_0) t_1 = self.l_28(t_0) t_8 = self.l_29(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + x0) t_7 = self.l_30(t_7) t_7 = self.l_31(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_0 = (t_7 + t_0) t_0 = self.l_34(t_0) t_7 = self.l_35(t_0) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_36(t_7) t_7 = self.l_37(t_7) t_0 = (t_7 + t_0) t_0 = self.l_38(t_0) t_7 = self.l_39(t_0) t_4 = self.l_40(t_0) t_2 = self.l_41(t_0) t_3 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_1 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_1, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_1 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_1, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_1 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_1, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_3, t_9) t_3 = math.sqrt(64) t_3 = (t_9 / t_3) t_3 = (t_3 + x0) t_3 = self.l_42(t_3) t_3 = self.l_43(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_9 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_9, t_6, t_3) t_3 = self.l_44(t_3) t_3 = self.l_45(t_3) t_0 = (t_3 + t_0) t_0 = self.l_46(t_0) t_3 = self.l_47(t_0) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_48(t_3) t_3 = self.l_49(t_3) t_0 = (t_3 + t_0) t_0 = self.l_50(t_0) t_3 = self.l_51(t_0) t_6 = self.l_52(t_0) t_9 = self.l_53(t_0) t_5 = t_3.size() t_1 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_2 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_2, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_7 = t_1[0] t_4 = t_1[1] t_2 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_7, t_4, t_2, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_1 = t_1.transpose((- 1), (- 2)) t_1 = torch.matmul(t_5, t_1) t_5 = math.sqrt(64) t_5 = (t_1 / t_5) t_5 = (t_5 + x0) t_5 = self.l_54(t_5) t_5 = self.l_55(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_1 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_1, t_7, t_5) t_5 = self.l_56(t_5) t_5 = self.l_57(t_5) t_0 = (t_5 + t_0) t_0 = self.l_58(t_0) t_5 = self.l_59(t_0) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_60(t_5) t_5 = self.l_61(t_5) t_0 = (t_5 + t_0) t_0 = self.l_62(t_0) t_5 = self.l_63(t_0) t_7 = self.l_64(t_0) t_1 = self.l_65(t_0) t_8 = t_5.size() t_4 = t_7.size() t_2 = t_1.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_3 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_3, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_9 = t_2[0] t_6 = t_2[1] t_3 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_9, t_6, t_3, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) t_8 = math.sqrt(64) t_8 = (t_4 / t_8) t_8 = (t_8 + x0) t_8 = self.l_66(t_8) t_8 = self.l_67(t_8) t_2 = torch.matmul(t_8, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_2 = t_2.contiguous() t_8 = t_2.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_4 = t_8[0] t_3 = t_8[1] t_8 = t_8[2] t_8 = t_2.view(t_4, t_3, t_8) t_8 = self.l_68(t_8) t_8 = self.l_69(t_8) t_0 = (t_8 + t_0) t_0 = self.l_70(t_0) t_8 = self.l_71(t_0) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_72(t_8) t_8 = self.l_73(t_8) t_0 = (t_8 + t_0) t_0 = self.l_74(t_0) t_8 = self.l_75(t_0) t_3 = self.l_76(t_0) t_4 = self.l_77(t_0) t_2 = t_8.size() t_6 = t_3.size() t_9 = t_4.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_1 = t_2[0] t_7 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_8.view(t_1, t_7, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_5 = t_6[0] t_7 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_5, t_7, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_1 = t_9[0] t_7 = t_9[1] t_5 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_1, t_7, t_5, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_2, t_6) t_2 = math.sqrt(64) t_2 = (t_6 / t_2) t_2 = (t_2 + x0) t_2 = self.l_78(t_2) t_2 = self.l_79(t_2) t_9 = torch.matmul(t_2, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_9 = t_9.contiguous() t_2 = t_9.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_6 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_2 = t_9.view(t_6, t_5, t_2) t_2 = self.l_80(t_2) t_2 = self.l_81(t_2) t_0 = (t_2 + t_0) t_0 = self.l_82(t_0) t_2 = self.l_83(t_0) t_2 = torch.nn.functional.gelu(t_2) t_2 = self.l_84(t_2) t_2 = self.l_85(t_2) t_0 = (t_2 + t_0) t_0 = self.l_86(t_0) t_2 = self.l_87(t_0) t_5 = self.l_88(t_0) t_6 = self.l_89(t_0) t_9 = t_2.size() t_7 = t_5.size() t_1 = t_6.size() t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_4 = t_9[0] t_3 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_2.view(t_4, t_3, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_8 = t_7[0] t_3 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_8, t_3, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_4 = t_1[0] t_3 = t_1[1] t_8 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_4, t_3, t_8, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_9, t_7) t_9 = math.sqrt(64) t_9 = (t_7 / t_9) t_9 = (t_9 + x0) t_9 = self.l_90(t_9) t_9 = self.l_91(t_9) t_1 = torch.matmul(t_9, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_1 = t_1.contiguous() t_9 = t_1.size() t_9 = t_9[slice(None, (- 2), None)] t_9 = (t_9 + (1024,)) t_7 = t_9[0] t_8 = t_9[1] t_9 = t_9[2] t_9 = t_1.view(t_7, t_8, t_9) t_9 = self.l_92(t_9) t_9 = self.l_93(t_9) t_0 = (t_9 + t_0) t_0 = self.l_94(t_0) t_9 = self.l_95(t_0) t_9 = torch.nn.functional.gelu(t_9) t_9 = self.l_96(t_9) t_9 = self.l_97(t_9) t_0 = (t_9 + t_0) t_0 = self.l_98(t_0) t_9 = self.l_99(t_0) t_8 = self.l_100(t_0) t_7 = self.l_101(t_0) t_1 = t_9.size() t_3 = t_8.size() t_4 = t_7.size() t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_6 = t_1[0] t_5 = t_1[1] t_2 = t_1[2] t_1 = t_1[3] t_1 = t_9.view(t_6, t_5, t_2, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_2 = t_3[0] t_5 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_2, t_5, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_6 = t_4[0] t_5 = t_4[1] t_2 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_6, t_5, t_2, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_1, t_3) t_1 = math.sqrt(64) t_1 = (t_3 / t_1) t_1 = (t_1 + x0) t_1 = self.l_102(t_1) t_1 = self.l_103(t_1) t_4 = torch.matmul(t_1, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_1 = t_4.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_3 = t_1[0] t_2 = t_1[1] t_1 = t_1[2] t_1 = t_4.view(t_3, t_2, t_1) t_1 = self.l_104(t_1) t_1 = self.l_105(t_1) t_0 = (t_1 + t_0) t_0 = self.l_106(t_0) t_1 = self.l_107(t_0) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_108(t_1) t_1 = self.l_109(t_1) t_0 = (t_1 + t_0) t_0 = self.l_110(t_0) t_1 = self.l_111(t_0) t_2 = self.l_112(t_0) t_3 = self.l_113(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + x0) t_4 = self.l_114(t_4) t_4 = self.l_115(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_116(t_4) t_4 = self.l_117(t_4) t_0 = (t_4 + t_0) t_0 = self.l_118(t_0) t_4 = self.l_119(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_120(t_4) t_4 = self.l_121(t_4) t_0 = (t_4 + t_0) t_0 = self.l_122(t_0) t_4 = self.l_123(t_0) t_9 = self.l_124(t_0) t_5 = self.l_125(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + x0) t_6 = self.l_126(t_6) t_6 = self.l_127(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_128(t_6) t_6 = self.l_129(t_6) t_0 = (t_6 + t_0) t_0 = self.l_130(t_0) t_6 = self.l_131(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_132(t_6) t_6 = self.l_133(t_6) t_0 = (t_6 + t_0) t_0 = self.l_134(t_0) t_6 = self.l_135(t_0) t_1 = self.l_136(t_0) t_8 = self.l_137(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + x0) t_7 = self.l_138(t_7) t_7 = self.l_139(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_140(t_7) t_7 = self.l_141(t_7) t_0 = (t_7 + t_0) t_0 = self.l_142(t_0) t_7 = self.l_143(t_0) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_144(t_7) t_7 = self.l_145(t_7) t_0 = (t_7 + t_0) t_0 = self.l_146(t_0) t_7 = self.l_149(t_0) t_0 = t_0[(slice(None, None, None), 0)] t_0 = self.l_147(t_0) t_0 = self.l_148(t_0) return (t_7,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def bert_large_uncased_whole_word_maskings_384_2p_bw12_pipedream(): return dict(model_type='bert', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': False, 'return_dict': False}, do_resize_token_embedding=False)
def create_pipeline_configuration(DEBUG=False, batch_size=12): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Tanh, LayerNorm, Dropout, Linear, Softmax, Embedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3]}, 'input_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([12, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 3}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 3}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1166': {'shape': torch.Size([12, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/torch::matmul_1170': {'shape': torch.Size([12, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 2}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1166': {'shape': torch.Size([12, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/torch::matmul_1170': {'shape': torch.Size([12, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 1}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([12, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm', 'l_41': 'bert.encoder.3.attention.self.query', 'l_42': 'bert.encoder.3.attention.self.key', 'l_43': 'bert.encoder.3.attention.self.value', 'l_44': 'bert.encoder.3.attention.self.softmax', 'l_45': 'bert.encoder.3.attention.self.dropout', 'l_46': 'bert.encoder.3.attention.output.dense', 'l_47': 'bert.encoder.3.attention.output.dropout', 'l_48': 'bert.encoder.3.attention.output.LayerNorm', 'l_49': 'bert.encoder.3.intermediate.dense', 'l_50': 'bert.encoder.3.output.dense', 'l_51': 'bert.encoder.3.output.dropout', 'l_52': 'bert.encoder.3.output.LayerNorm', 'l_53': 'bert.encoder.4.attention.self.query', 'l_54': 'bert.encoder.4.attention.self.key', 'l_55': 'bert.encoder.4.attention.self.value', 'l_56': 'bert.encoder.4.attention.self.softmax', 'l_57': 'bert.encoder.4.attention.self.dropout', 'l_58': 'bert.encoder.4.attention.output.dense', 'l_59': 'bert.encoder.4.attention.output.dropout', 'l_60': 'bert.encoder.4.attention.output.LayerNorm', 'l_61': 'bert.encoder.4.intermediate.dense', 'l_62': 'bert.encoder.4.output.dense', 'l_63': 'bert.encoder.4.output.dropout', 'l_64': 'bert.encoder.4.output.LayerNorm', 'l_65': 'bert.encoder.5.attention.self.query', 'l_66': 'bert.encoder.5.attention.self.key', 'l_67': 'bert.encoder.5.attention.self.value', 'l_68': 'bert.encoder.5.attention.self.softmax', 'l_69': 'bert.encoder.5.attention.self.dropout', 'l_70': 'bert.encoder.5.attention.output.dense', 'l_71': 'bert.encoder.5.attention.output.dropout', 'l_72': 'bert.encoder.5.attention.output.LayerNorm'} self.to(self.device) def forward(self, *args): (attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure) t_0 = self.l_0(input_ids) t_1 = self.l_2(token_type_ids) t_2 = input_ids.size(1) t_2 = torch.arange(t_2, dtype=torch.int64, device=self.device) t_2 = t_2.unsqueeze(0) t_2 = t_2.expand_as(input_ids) t_2 = self.l_1(t_2) t_2 = (t_0 + t_2) t_1 = (t_2 + t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_2 = self.l_5(t_1) t_0 = self.l_6(t_1) t_3 = self.l_7(t_1) t_4 = t_2.size() t_5 = t_0.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_2.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_1 = (t_4 + t_1) t_1 = self.l_12(t_1) t_4 = self.l_13(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_1 = (t_4 + t_1) t_1 = self.l_16(t_1) t_4 = self.l_17(t_1) t_9 = self.l_18(t_1) t_5 = self.l_19(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_0 = t_6[1] t_2 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_0, t_2, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_0 = t_7[1] t_2 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_0, t_2, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_2, t_6) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_1 = (t_6 + t_1) t_1 = self.l_24(t_1) t_6 = self.l_25(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_1 = (t_6 + t_1) t_1 = self.l_28(t_1) t_6 = self.l_29(t_1) t_2 = self.l_30(t_1) t_8 = self.l_31(t_1) t_7 = t_6.size() t_0 = t_2.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_9 = t_0[1] t_5 = t_0[2] t_0 = t_0[3] t_0 = t_2.view(t_4, t_9, t_5, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_7, t_0) t_7 = math.sqrt(64) t_7 = (t_0 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_0 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_0, t_4, t_7) t_7 = self.l_34(t_7) t_7 = self.l_35(t_7) t_1 = (t_7 + t_1) t_1 = self.l_36(t_1) t_7 = self.l_37(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_38(t_7) t_7 = self.l_39(t_7) t_1 = (t_7 + t_1) t_1 = self.l_40(t_1) t_7 = self.l_41(t_1) t_4 = self.l_42(t_1) t_0 = self.l_43(t_1) t_3 = t_7.size() t_9 = t_4.size() t_5 = t_0.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_2 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_2, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_2 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_2, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_2 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_8, t_2, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_3, t_9) t_3 = math.sqrt(64) t_3 = (t_9 / t_3) t_3 = (t_3 + attention_mask) t_3 = self.l_44(t_3) t_3 = self.l_45(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_9 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_9, t_6, t_3) t_3 = self.l_46(t_3) t_3 = self.l_47(t_3) t_1 = (t_3 + t_1) t_1 = self.l_48(t_1) t_3 = self.l_49(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_50(t_3) t_3 = self.l_51(t_3) t_1 = (t_3 + t_1) t_1 = self.l_52(t_1) t_3 = self.l_53(t_1) t_6 = self.l_54(t_1) t_9 = self.l_55(t_1) t_5 = t_3.size() t_2 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_0 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_0, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_7 = t_2[0] t_4 = t_2[1] t_0 = t_2[2] t_2 = t_2[3] t_2 = t_6.view(t_7, t_4, t_0, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_0 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_0, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_5, t_2) t_5 = math.sqrt(64) t_5 = (t_2 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_56(t_5) t_5 = self.l_57(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_2 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_2, t_7, t_5) t_5 = self.l_58(t_5) t_5 = self.l_59(t_5) t_1 = (t_5 + t_1) t_1 = self.l_60(t_1) t_5 = self.l_61(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_62(t_5) t_5 = self.l_63(t_5) t_1 = (t_5 + t_1) t_1 = self.l_64(t_1) t_5 = self.l_65(t_1) t_7 = self.l_66(t_1) t_2 = self.l_67(t_1) t_8 = t_5.size() t_4 = t_7.size() t_0 = t_2.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_3 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_3, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_9 = t_0[0] t_6 = t_0[1] t_3 = t_0[2] t_0 = t_0[3] t_0 = t_2.view(t_9, t_6, t_3, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) t_8 = math.sqrt(64) t_8 = (t_4 / t_8) t_8 = (t_8 + attention_mask) t_8 = self.l_68(t_8) t_8 = self.l_69(t_8) t_0 = torch.matmul(t_8, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_8 = t_0.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_4 = t_8[0] t_3 = t_8[1] t_8 = t_8[2] t_8 = t_0.view(t_4, t_3, t_8) t_8 = self.l_70(t_8) t_8 = self.l_71(t_8) t_1 = (t_8 + t_1) t_1 = self.l_72(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'bert.encoder.5.intermediate.dense', 'l_1': 'bert.encoder.5.output.dense', 'l_2': 'bert.encoder.5.output.dropout', 'l_3': 'bert.encoder.5.output.LayerNorm', 'l_4': 'bert.encoder.6.attention.self.query', 'l_5': 'bert.encoder.6.attention.self.key', 'l_6': 'bert.encoder.6.attention.self.value', 'l_7': 'bert.encoder.6.attention.self.softmax', 'l_8': 'bert.encoder.6.attention.self.dropout', 'l_9': 'bert.encoder.6.attention.output.dense', 'l_10': 'bert.encoder.6.attention.output.dropout', 'l_11': 'bert.encoder.6.attention.output.LayerNorm', 'l_12': 'bert.encoder.6.intermediate.dense', 'l_13': 'bert.encoder.6.output.dense', 'l_14': 'bert.encoder.6.output.dropout', 'l_15': 'bert.encoder.6.output.LayerNorm', 'l_16': 'bert.encoder.7.attention.self.query', 'l_17': 'bert.encoder.7.attention.self.key', 'l_18': 'bert.encoder.7.attention.self.value', 'l_19': 'bert.encoder.7.attention.self.softmax', 'l_20': 'bert.encoder.7.attention.self.dropout', 'l_21': 'bert.encoder.7.attention.output.dense', 'l_22': 'bert.encoder.7.attention.output.dropout', 'l_23': 'bert.encoder.7.attention.output.LayerNorm', 'l_24': 'bert.encoder.7.intermediate.dense', 'l_25': 'bert.encoder.7.output.dense', 'l_26': 'bert.encoder.7.output.dropout', 'l_27': 'bert.encoder.7.output.LayerNorm', 'l_28': 'bert.encoder.8.attention.self.query', 'l_29': 'bert.encoder.8.attention.self.key', 'l_30': 'bert.encoder.8.attention.self.value', 'l_31': 'bert.encoder.8.attention.self.softmax', 'l_32': 'bert.encoder.8.attention.self.dropout', 'l_33': 'bert.encoder.8.attention.output.dense', 'l_34': 'bert.encoder.8.attention.output.dropout', 'l_35': 'bert.encoder.8.attention.output.LayerNorm', 'l_36': 'bert.encoder.8.intermediate.dense', 'l_37': 'bert.encoder.8.output.dense', 'l_38': 'bert.encoder.8.output.dropout', 'l_39': 'bert.encoder.8.output.LayerNorm', 'l_40': 'bert.encoder.9.attention.self.query', 'l_41': 'bert.encoder.9.attention.self.key', 'l_42': 'bert.encoder.9.attention.self.value', 'l_43': 'bert.encoder.9.attention.self.softmax', 'l_44': 'bert.encoder.9.attention.self.dropout', 'l_45': 'bert.encoder.9.attention.output.dense', 'l_46': 'bert.encoder.9.attention.output.dropout', 'l_47': 'bert.encoder.9.attention.output.LayerNorm', 'l_48': 'bert.encoder.9.intermediate.dense', 'l_49': 'bert.encoder.9.output.dense', 'l_50': 'bert.encoder.9.output.dropout', 'l_51': 'bert.encoder.9.output.LayerNorm', 'l_52': 'bert.encoder.10.attention.self.query', 'l_53': 'bert.encoder.10.attention.self.key', 'l_54': 'bert.encoder.10.attention.self.value', 'l_55': 'bert.encoder.10.attention.self.softmax', 'l_56': 'bert.encoder.10.attention.self.dropout', 'l_57': 'bert.encoder.10.attention.output.dense', 'l_58': 'bert.encoder.10.attention.output.dropout', 'l_59': 'bert.encoder.10.attention.output.LayerNorm', 'l_60': 'bert.encoder.10.intermediate.dense', 'l_61': 'bert.encoder.10.output.dense', 'l_62': 'bert.encoder.10.output.dropout', 'l_63': 'bert.encoder.10.output.LayerNorm', 'l_64': 'bert.encoder.11.attention.self.query', 'l_65': 'bert.encoder.11.attention.self.key', 'l_66': 'bert.encoder.11.attention.self.value'} self.to(self.device) def forward(self, *args): (attention_mask, x0) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_1(t_0) t_0 = self.l_2(t_0) t_0 = (t_0 + x0) t_0 = self.l_3(t_0) t_1 = self.l_4(t_0) t_2 = self.l_5(t_0) t_3 = self.l_6(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_7(t_4) t_4 = self.l_8(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_0 = (t_4 + t_0) t_0 = self.l_11(t_0) t_4 = self.l_12(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_13(t_4) t_4 = self.l_14(t_4) t_0 = (t_4 + t_0) t_0 = self.l_15(t_0) t_4 = self.l_16(t_0) t_9 = self.l_17(t_0) t_5 = self.l_18(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_19(t_6) t_6 = self.l_20(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_21(t_6) t_6 = self.l_22(t_6) t_0 = (t_6 + t_0) t_0 = self.l_23(t_0) t_6 = self.l_24(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_25(t_6) t_6 = self.l_26(t_6) t_0 = (t_6 + t_0) t_0 = self.l_27(t_0) t_6 = self.l_28(t_0) t_1 = self.l_29(t_0) t_8 = self.l_30(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_31(t_7) t_7 = self.l_32(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_33(t_7) t_7 = self.l_34(t_7) t_0 = (t_7 + t_0) t_0 = self.l_35(t_0) t_7 = self.l_36(t_0) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_37(t_7) t_7 = self.l_38(t_7) t_0 = (t_7 + t_0) t_0 = self.l_39(t_0) t_7 = self.l_40(t_0) t_4 = self.l_41(t_0) t_2 = self.l_42(t_0) t_3 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_1 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_1, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_1 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_1, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_1 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_1, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_3, t_9) t_3 = math.sqrt(64) t_3 = (t_9 / t_3) t_3 = (t_3 + attention_mask) t_3 = self.l_43(t_3) t_3 = self.l_44(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_9 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_9, t_6, t_3) t_3 = self.l_45(t_3) t_3 = self.l_46(t_3) t_0 = (t_3 + t_0) t_0 = self.l_47(t_0) t_3 = self.l_48(t_0) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_49(t_3) t_3 = self.l_50(t_3) t_0 = (t_3 + t_0) t_0 = self.l_51(t_0) t_3 = self.l_52(t_0) t_6 = self.l_53(t_0) t_9 = self.l_54(t_0) t_5 = t_3.size() t_1 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_2 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_2, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_7 = t_1[0] t_4 = t_1[1] t_2 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_7, t_4, t_2, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_1 = t_1.transpose((- 1), (- 2)) t_1 = torch.matmul(t_5, t_1) t_5 = math.sqrt(64) t_5 = (t_1 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_55(t_5) t_5 = self.l_56(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_1 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_1, t_7, t_5) t_5 = self.l_57(t_5) t_5 = self.l_58(t_5) t_0 = (t_5 + t_0) t_0 = self.l_59(t_0) t_5 = self.l_60(t_0) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_61(t_5) t_5 = self.l_62(t_5) t_0 = (t_5 + t_0) t_0 = self.l_63(t_0) t_5 = self.l_64(t_0) t_7 = self.l_65(t_0) t_1 = self.l_66(t_0) t_8 = t_5.size() t_4 = t_7.size() t_2 = t_1.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_3 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_3, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_9 = t_2[0] t_6 = t_2[1] t_3 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_9, t_6, t_3, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) return list(flatten((t_0, t_2, t_4))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'bert.encoder.11.attention.self.softmax', 'l_1': 'bert.encoder.11.attention.self.dropout', 'l_2': 'bert.encoder.11.attention.output.dense', 'l_3': 'bert.encoder.11.attention.output.dropout', 'l_4': 'bert.encoder.11.attention.output.LayerNorm', 'l_5': 'bert.encoder.11.intermediate.dense', 'l_6': 'bert.encoder.11.output.dense', 'l_7': 'bert.encoder.11.output.dropout', 'l_8': 'bert.encoder.11.output.LayerNorm', 'l_9': 'bert.encoder.12.attention.self.query', 'l_10': 'bert.encoder.12.attention.self.key', 'l_11': 'bert.encoder.12.attention.self.value', 'l_12': 'bert.encoder.12.attention.self.softmax', 'l_13': 'bert.encoder.12.attention.self.dropout', 'l_14': 'bert.encoder.12.attention.output.dense', 'l_15': 'bert.encoder.12.attention.output.dropout', 'l_16': 'bert.encoder.12.attention.output.LayerNorm', 'l_17': 'bert.encoder.12.intermediate.dense', 'l_18': 'bert.encoder.12.output.dense', 'l_19': 'bert.encoder.12.output.dropout', 'l_20': 'bert.encoder.12.output.LayerNorm', 'l_21': 'bert.encoder.13.attention.self.query', 'l_22': 'bert.encoder.13.attention.self.key', 'l_23': 'bert.encoder.13.attention.self.value', 'l_24': 'bert.encoder.13.attention.self.softmax', 'l_25': 'bert.encoder.13.attention.self.dropout', 'l_26': 'bert.encoder.13.attention.output.dense', 'l_27': 'bert.encoder.13.attention.output.dropout', 'l_28': 'bert.encoder.13.attention.output.LayerNorm', 'l_29': 'bert.encoder.13.intermediate.dense', 'l_30': 'bert.encoder.13.output.dense', 'l_31': 'bert.encoder.13.output.dropout', 'l_32': 'bert.encoder.13.output.LayerNorm', 'l_33': 'bert.encoder.14.attention.self.query', 'l_34': 'bert.encoder.14.attention.self.key', 'l_35': 'bert.encoder.14.attention.self.value', 'l_36': 'bert.encoder.14.attention.self.softmax', 'l_37': 'bert.encoder.14.attention.self.dropout', 'l_38': 'bert.encoder.14.attention.output.dense', 'l_39': 'bert.encoder.14.attention.output.dropout', 'l_40': 'bert.encoder.14.attention.output.LayerNorm', 'l_41': 'bert.encoder.14.intermediate.dense', 'l_42': 'bert.encoder.14.output.dense', 'l_43': 'bert.encoder.14.output.dropout', 'l_44': 'bert.encoder.14.output.LayerNorm', 'l_45': 'bert.encoder.15.attention.self.query', 'l_46': 'bert.encoder.15.attention.self.key', 'l_47': 'bert.encoder.15.attention.self.value', 'l_48': 'bert.encoder.15.attention.self.softmax', 'l_49': 'bert.encoder.15.attention.self.dropout', 'l_50': 'bert.encoder.15.attention.output.dense', 'l_51': 'bert.encoder.15.attention.output.dropout', 'l_52': 'bert.encoder.15.attention.output.LayerNorm', 'l_53': 'bert.encoder.15.intermediate.dense', 'l_54': 'bert.encoder.15.output.dense', 'l_55': 'bert.encoder.15.output.dropout', 'l_56': 'bert.encoder.15.output.LayerNorm', 'l_57': 'bert.encoder.16.attention.self.query', 'l_58': 'bert.encoder.16.attention.self.key', 'l_59': 'bert.encoder.16.attention.self.value', 'l_60': 'bert.encoder.16.attention.self.softmax', 'l_61': 'bert.encoder.16.attention.self.dropout', 'l_62': 'bert.encoder.16.attention.output.dense', 'l_63': 'bert.encoder.16.attention.output.dropout', 'l_64': 'bert.encoder.16.attention.output.LayerNorm', 'l_65': 'bert.encoder.16.intermediate.dense', 'l_66': 'bert.encoder.16.output.dense'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2) = unflatten(args, self.input_structure) t_0 = math.sqrt(64) t_0 = (x2 / t_0) t_0 = (t_0 + attention_mask) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = torch.matmul(t_0, x1) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_2, t_3, t_1) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_1 = (t_1 + x0) t_1 = self.l_4(t_1) t_3 = self.l_5(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_6(t_3) t_3 = self.l_7(t_3) t_1 = (t_3 + t_1) t_1 = self.l_8(t_1) t_3 = self.l_9(t_1) t_2 = self.l_10(t_1) t_0 = self.l_11(t_1) t_4 = t_3.size() t_5 = t_2.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_1 = (t_4 + t_1) t_1 = self.l_16(t_1) t_4 = self.l_17(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_1 = (t_4 + t_1) t_1 = self.l_20(t_1) t_4 = self.l_21(t_1) t_9 = self.l_22(t_1) t_5 = self.l_23(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_2 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_2, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_2 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_2, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_2 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_2, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_1 = (t_6 + t_1) t_1 = self.l_28(t_1) t_6 = self.l_29(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_30(t_6) t_6 = self.l_31(t_6) t_1 = (t_6 + t_1) t_1 = self.l_32(t_1) t_6 = self.l_33(t_1) t_3 = self.l_34(t_1) t_8 = self.l_35(t_1) t_7 = t_6.size() t_2 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_36(t_7) t_7 = self.l_37(t_7) t_0 = torch.matmul(t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_7 = t_0.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_0.view(t_2, t_4, t_7) t_7 = self.l_38(t_7) t_7 = self.l_39(t_7) t_1 = (t_7 + t_1) t_1 = self.l_40(t_1) t_7 = self.l_41(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_42(t_7) t_7 = self.l_43(t_7) t_1 = (t_7 + t_1) t_1 = self.l_44(t_1) t_7 = self.l_45(t_1) t_4 = self.l_46(t_1) t_2 = self.l_47(t_1) t_0 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_8 = t_0[0] t_3 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_7.view(t_8, t_3, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_3 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_3, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_3 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_3, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_0, t_9) t_0 = math.sqrt(64) t_0 = (t_9 / t_0) t_0 = (t_0 + attention_mask) t_0 = self.l_48(t_0) t_0 = self.l_49(t_0) t_5 = torch.matmul(t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_0 = t_5.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_9 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_5.view(t_9, t_6, t_0) t_0 = self.l_50(t_0) t_0 = self.l_51(t_0) t_1 = (t_0 + t_1) t_1 = self.l_52(t_1) t_0 = self.l_53(t_1) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_54(t_0) t_0 = self.l_55(t_0) t_1 = (t_0 + t_1) t_1 = self.l_56(t_1) t_0 = self.l_57(t_1) t_6 = self.l_58(t_1) t_9 = self.l_59(t_1) t_5 = t_0.size() t_3 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_2 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_2, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_4 = t_3[1] t_2 = t_3[2] t_3 = t_3[3] t_3 = t_6.view(t_7, t_4, t_2, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_5, t_3) t_5 = math.sqrt(64) t_5 = (t_3 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_60(t_5) t_5 = self.l_61(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_3 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_3, t_7, t_5) t_5 = self.l_62(t_5) t_5 = self.l_63(t_5) t_1 = (t_5 + t_1) t_1 = self.l_64(t_1) t_5 = self.l_65(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_66(t_5) return list(flatten((t_1, t_5))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.16.output.dropout', 'l_1': 'bert.encoder.16.output.LayerNorm', 'l_2': 'bert.encoder.17.attention.self.query', 'l_3': 'bert.encoder.17.attention.self.key', 'l_4': 'bert.encoder.17.attention.self.value', 'l_5': 'bert.encoder.17.attention.self.softmax', 'l_6': 'bert.encoder.17.attention.self.dropout', 'l_7': 'bert.encoder.17.attention.output.dense', 'l_8': 'bert.encoder.17.attention.output.dropout', 'l_9': 'bert.encoder.17.attention.output.LayerNorm', 'l_10': 'bert.encoder.17.intermediate.dense', 'l_11': 'bert.encoder.17.output.dense', 'l_12': 'bert.encoder.17.output.dropout', 'l_13': 'bert.encoder.17.output.LayerNorm', 'l_14': 'bert.encoder.18.attention.self.query', 'l_15': 'bert.encoder.18.attention.self.key', 'l_16': 'bert.encoder.18.attention.self.value', 'l_17': 'bert.encoder.18.attention.self.softmax', 'l_18': 'bert.encoder.18.attention.self.dropout', 'l_19': 'bert.encoder.18.attention.output.dense', 'l_20': 'bert.encoder.18.attention.output.dropout', 'l_21': 'bert.encoder.18.attention.output.LayerNorm', 'l_22': 'bert.encoder.18.intermediate.dense', 'l_23': 'bert.encoder.18.output.dense', 'l_24': 'bert.encoder.18.output.dropout', 'l_25': 'bert.encoder.18.output.LayerNorm', 'l_26': 'bert.encoder.19.attention.self.query', 'l_27': 'bert.encoder.19.attention.self.key', 'l_28': 'bert.encoder.19.attention.self.value', 'l_29': 'bert.encoder.19.attention.self.softmax', 'l_30': 'bert.encoder.19.attention.self.dropout', 'l_31': 'bert.encoder.19.attention.output.dense', 'l_32': 'bert.encoder.19.attention.output.dropout', 'l_33': 'bert.encoder.19.attention.output.LayerNorm', 'l_34': 'bert.encoder.19.intermediate.dense', 'l_35': 'bert.encoder.19.output.dense', 'l_36': 'bert.encoder.19.output.dropout', 'l_37': 'bert.encoder.19.output.LayerNorm', 'l_38': 'bert.encoder.20.attention.self.query', 'l_39': 'bert.encoder.20.attention.self.key', 'l_40': 'bert.encoder.20.attention.self.value', 'l_41': 'bert.encoder.20.attention.self.softmax', 'l_42': 'bert.encoder.20.attention.self.dropout', 'l_43': 'bert.encoder.20.attention.output.dense', 'l_44': 'bert.encoder.20.attention.output.dropout', 'l_45': 'bert.encoder.20.attention.output.LayerNorm', 'l_46': 'bert.encoder.20.intermediate.dense', 'l_47': 'bert.encoder.20.output.dense', 'l_48': 'bert.encoder.20.output.dropout', 'l_49': 'bert.encoder.20.output.LayerNorm', 'l_50': 'bert.encoder.21.attention.self.query', 'l_51': 'bert.encoder.21.attention.self.key', 'l_52': 'bert.encoder.21.attention.self.value', 'l_53': 'bert.encoder.21.attention.self.softmax', 'l_54': 'bert.encoder.21.attention.self.dropout', 'l_55': 'bert.encoder.21.attention.output.dense', 'l_56': 'bert.encoder.21.attention.output.dropout', 'l_57': 'bert.encoder.21.attention.output.LayerNorm', 'l_58': 'bert.encoder.21.intermediate.dense', 'l_59': 'bert.encoder.21.output.dense', 'l_60': 'bert.encoder.21.output.dropout', 'l_61': 'bert.encoder.21.output.LayerNorm', 'l_62': 'bert.encoder.22.attention.self.query', 'l_63': 'bert.encoder.22.attention.self.key', 'l_64': 'bert.encoder.22.attention.self.value', 'l_65': 'bert.encoder.22.attention.self.softmax', 'l_66': 'bert.encoder.22.attention.self.dropout', 'l_67': 'bert.encoder.22.attention.output.dense', 'l_68': 'bert.encoder.22.attention.output.dropout', 'l_69': 'bert.encoder.22.attention.output.LayerNorm', 'l_70': 'bert.encoder.22.intermediate.dense', 'l_71': 'bert.encoder.22.output.dense', 'l_72': 'bert.encoder.22.output.dropout', 'l_73': 'bert.encoder.22.output.LayerNorm', 'l_74': 'bert.encoder.23.attention.self.query', 'l_75': 'bert.encoder.23.attention.self.key', 'l_76': 'bert.encoder.23.attention.self.value', 'l_77': 'bert.encoder.23.attention.self.softmax', 'l_78': 'bert.encoder.23.attention.self.dropout', 'l_79': 'bert.encoder.23.attention.output.dense', 'l_80': 'bert.encoder.23.attention.output.dropout', 'l_81': 'bert.encoder.23.attention.output.LayerNorm', 'l_82': 'bert.encoder.23.intermediate.dense', 'l_83': 'bert.encoder.23.output.dense', 'l_84': 'bert.encoder.23.output.dropout', 'l_85': 'bert.encoder.23.output.LayerNorm', 'l_86': 'bert.pooler.dense', 'l_87': 'bert.pooler.activation', 'l_88': 'qa_outputs'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = (t_0 + x0) t_0 = self.l_1(t_0) t_1 = self.l_2(t_0) t_2 = self.l_3(t_0) t_3 = self.l_4(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_5(t_4) t_4 = self.l_6(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_7(t_4) t_4 = self.l_8(t_4) t_0 = (t_4 + t_0) t_0 = self.l_9(t_0) t_4 = self.l_10(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_11(t_4) t_4 = self.l_12(t_4) t_0 = (t_4 + t_0) t_0 = self.l_13(t_0) t_4 = self.l_14(t_0) t_9 = self.l_15(t_0) t_5 = self.l_16(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_17(t_6) t_6 = self.l_18(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_19(t_6) t_6 = self.l_20(t_6) t_0 = (t_6 + t_0) t_0 = self.l_21(t_0) t_6 = self.l_22(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_23(t_6) t_6 = self.l_24(t_6) t_0 = (t_6 + t_0) t_0 = self.l_25(t_0) t_6 = self.l_26(t_0) t_1 = self.l_27(t_0) t_8 = self.l_28(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_29(t_7) t_7 = self.l_30(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_31(t_7) t_7 = self.l_32(t_7) t_0 = (t_7 + t_0) t_0 = self.l_33(t_0) t_7 = self.l_34(t_0) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_35(t_7) t_7 = self.l_36(t_7) t_0 = (t_7 + t_0) t_0 = self.l_37(t_0) t_7 = self.l_38(t_0) t_4 = self.l_39(t_0) t_2 = self.l_40(t_0) t_3 = t_7.size() t_9 = t_4.size() t_5 = t_2.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_1 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_1, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_1 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_1, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_1 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_8, t_1, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_3, t_9) t_3 = math.sqrt(64) t_3 = (t_9 / t_3) t_3 = (t_3 + attention_mask) t_3 = self.l_41(t_3) t_3 = self.l_42(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_9 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_9, t_6, t_3) t_3 = self.l_43(t_3) t_3 = self.l_44(t_3) t_0 = (t_3 + t_0) t_0 = self.l_45(t_0) t_3 = self.l_46(t_0) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_47(t_3) t_3 = self.l_48(t_3) t_0 = (t_3 + t_0) t_0 = self.l_49(t_0) t_3 = self.l_50(t_0) t_6 = self.l_51(t_0) t_9 = self.l_52(t_0) t_5 = t_3.size() t_1 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_2 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_2, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_7 = t_1[0] t_4 = t_1[1] t_2 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_7, t_4, t_2, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_1 = t_1.transpose((- 1), (- 2)) t_1 = torch.matmul(t_5, t_1) t_5 = math.sqrt(64) t_5 = (t_1 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_53(t_5) t_5 = self.l_54(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_1 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_1, t_7, t_5) t_5 = self.l_55(t_5) t_5 = self.l_56(t_5) t_0 = (t_5 + t_0) t_0 = self.l_57(t_0) t_5 = self.l_58(t_0) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_59(t_5) t_5 = self.l_60(t_5) t_0 = (t_5 + t_0) t_0 = self.l_61(t_0) t_5 = self.l_62(t_0) t_7 = self.l_63(t_0) t_1 = self.l_64(t_0) t_8 = t_5.size() t_4 = t_7.size() t_2 = t_1.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_3 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_3, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_9 = t_2[0] t_6 = t_2[1] t_3 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_9, t_6, t_3, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) t_8 = math.sqrt(64) t_8 = (t_4 / t_8) t_8 = (t_8 + attention_mask) t_8 = self.l_65(t_8) t_8 = self.l_66(t_8) t_2 = torch.matmul(t_8, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_2 = t_2.contiguous() t_8 = t_2.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_4 = t_8[0] t_3 = t_8[1] t_8 = t_8[2] t_8 = t_2.view(t_4, t_3, t_8) t_8 = self.l_67(t_8) t_8 = self.l_68(t_8) t_0 = (t_8 + t_0) t_0 = self.l_69(t_0) t_8 = self.l_70(t_0) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_71(t_8) t_8 = self.l_72(t_8) t_0 = (t_8 + t_0) t_0 = self.l_73(t_0) t_8 = self.l_74(t_0) t_3 = self.l_75(t_0) t_4 = self.l_76(t_0) t_2 = t_8.size() t_6 = t_3.size() t_9 = t_4.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_1 = t_2[0] t_7 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_8.view(t_1, t_7, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_5 = t_6[0] t_7 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_5, t_7, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_1 = t_9[0] t_7 = t_9[1] t_5 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_1, t_7, t_5, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_2, t_6) t_2 = math.sqrt(64) t_2 = (t_6 / t_2) t_2 = (t_2 + attention_mask) t_2 = self.l_77(t_2) t_2 = self.l_78(t_2) t_9 = torch.matmul(t_2, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_9 = t_9.contiguous() t_2 = t_9.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_6 = t_2[0] t_5 = t_2[1] t_2 = t_2[2] t_2 = t_9.view(t_6, t_5, t_2) t_2 = self.l_79(t_2) t_2 = self.l_80(t_2) t_0 = (t_2 + t_0) t_0 = self.l_81(t_0) t_2 = self.l_82(t_0) t_2 = torch.nn.functional.gelu(t_2) t_2 = self.l_83(t_2) t_2 = self.l_84(t_2) t_0 = (t_2 + t_0) t_0 = self.l_85(t_0) t_2 = self.l_88(t_0) t_0 = t_0[(slice(None, None, None), 0)] t_0 = self.l_86(t_0) t_0 = self.l_87(t_0) return (t_2,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def bert_large_uncased_whole_word_maskings_384_4p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def create_pipeline_configuration(DEBUG=False, batch_size=12): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Softmax, Embedding, Linear, Tanh, LayerNorm, Dropout), 'model_inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3]}, 'input_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([12, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 3}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([12, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 3}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 2}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 1}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([12, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([12, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([12, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm', 'l_41': 'bert.encoder.3.attention.self.query', 'l_42': 'bert.encoder.3.attention.self.key', 'l_43': 'bert.encoder.3.attention.self.value', 'l_44': 'bert.encoder.3.attention.self.softmax', 'l_45': 'bert.encoder.3.attention.self.dropout', 'l_46': 'bert.encoder.3.attention.output.dense', 'l_47': 'bert.encoder.3.attention.output.dropout', 'l_48': 'bert.encoder.3.attention.output.LayerNorm', 'l_49': 'bert.encoder.3.intermediate.dense', 'l_50': 'bert.encoder.3.output.dense', 'l_51': 'bert.encoder.3.output.dropout', 'l_52': 'bert.encoder.3.output.LayerNorm', 'l_53': 'bert.encoder.4.attention.self.query', 'l_54': 'bert.encoder.4.attention.self.key', 'l_55': 'bert.encoder.4.attention.self.value', 'l_56': 'bert.encoder.4.attention.self.softmax', 'l_57': 'bert.encoder.4.attention.self.dropout', 'l_58': 'bert.encoder.4.attention.output.dense', 'l_59': 'bert.encoder.4.attention.output.dropout', 'l_60': 'bert.encoder.4.attention.output.LayerNorm', 'l_61': 'bert.encoder.4.intermediate.dense', 'l_62': 'bert.encoder.4.output.dense', 'l_63': 'bert.encoder.4.output.dropout', 'l_64': 'bert.encoder.4.output.LayerNorm', 'l_65': 'bert.encoder.5.attention.self.query', 'l_66': 'bert.encoder.5.attention.self.key', 'l_67': 'bert.encoder.5.attention.self.value', 'l_68': 'bert.encoder.5.attention.self.softmax', 'l_69': 'bert.encoder.5.attention.self.dropout', 'l_70': 'bert.encoder.5.attention.output.dense', 'l_71': 'bert.encoder.5.attention.output.dropout', 'l_72': 'bert.encoder.5.attention.output.LayerNorm', 'l_73': 'bert.encoder.5.intermediate.dense', 'l_74': 'bert.encoder.5.output.dense', 'l_75': 'bert.encoder.5.output.dropout', 'l_76': 'bert.encoder.5.output.LayerNorm'} self.to(self.device) def forward(self, *args): (attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure) t_0 = self.l_0(input_ids) t_1 = self.l_2(token_type_ids) t_2 = input_ids.size(1) t_2 = torch.arange(t_2, dtype=torch.int64, device=self.device) t_2 = t_2.unsqueeze(0) t_2 = t_2.expand_as(input_ids) t_2 = self.l_1(t_2) t_2 = (t_0 + t_2) t_1 = (t_2 + t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_2 = self.l_5(t_1) t_0 = self.l_6(t_1) t_3 = self.l_7(t_1) t_4 = t_2.size() t_5 = t_0.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_2.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_1 = (t_4 + t_1) t_1 = self.l_12(t_1) t_4 = self.l_13(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_1 = (t_4 + t_1) t_1 = self.l_16(t_1) t_4 = self.l_17(t_1) t_9 = self.l_18(t_1) t_5 = self.l_19(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_0 = t_6[1] t_2 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_0, t_2, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_0 = t_7[1] t_2 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_0, t_2, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_2, t_6) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_1 = (t_6 + t_1) t_1 = self.l_24(t_1) t_6 = self.l_25(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_1 = (t_6 + t_1) t_1 = self.l_28(t_1) t_6 = self.l_29(t_1) t_2 = self.l_30(t_1) t_8 = self.l_31(t_1) t_7 = t_6.size() t_0 = t_2.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_9 = t_0[1] t_5 = t_0[2] t_0 = t_0[3] t_0 = t_2.view(t_4, t_9, t_5, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_7, t_0) t_7 = math.sqrt(64) t_7 = (t_0 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_0 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_0, t_4, t_7) t_7 = self.l_34(t_7) t_7 = self.l_35(t_7) t_1 = (t_7 + t_1) t_1 = self.l_36(t_1) t_7 = self.l_37(t_1) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_38(t_7) t_7 = self.l_39(t_7) t_1 = (t_7 + t_1) t_1 = self.l_40(t_1) t_7 = self.l_41(t_1) t_4 = self.l_42(t_1) t_0 = self.l_43(t_1) t_3 = t_7.size() t_9 = t_4.size() t_5 = t_0.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_8 = t_3[0] t_2 = t_3[1] t_6 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_8, t_2, t_6, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_6 = t_9[0] t_2 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_4.view(t_6, t_2, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_2 = t_5[1] t_6 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_8, t_2, t_6, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_3, t_9) t_3 = math.sqrt(64) t_3 = (t_9 / t_3) t_3 = (t_3 + attention_mask) t_3 = self.l_44(t_3) t_3 = self.l_45(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_9 = t_3[0] t_6 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_9, t_6, t_3) t_3 = self.l_46(t_3) t_3 = self.l_47(t_3) t_1 = (t_3 + t_1) t_1 = self.l_48(t_1) t_3 = self.l_49(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_50(t_3) t_3 = self.l_51(t_3) t_1 = (t_3 + t_1) t_1 = self.l_52(t_1) t_3 = self.l_53(t_1) t_6 = self.l_54(t_1) t_9 = self.l_55(t_1) t_5 = t_3.size() t_2 = t_6.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_0 = t_5[0] t_4 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_0, t_4, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_7 = t_2[0] t_4 = t_2[1] t_0 = t_2[2] t_2 = t_2[3] t_2 = t_6.view(t_7, t_4, t_0, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_0 = t_8[0] t_4 = t_8[1] t_7 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_0, t_4, t_7, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_5, t_2) t_5 = math.sqrt(64) t_5 = (t_2 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_56(t_5) t_5 = self.l_57(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_2 = t_5[0] t_7 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_2, t_7, t_5) t_5 = self.l_58(t_5) t_5 = self.l_59(t_5) t_1 = (t_5 + t_1) t_1 = self.l_60(t_1) t_5 = self.l_61(t_1) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_62(t_5) t_5 = self.l_63(t_5) t_1 = (t_5 + t_1) t_1 = self.l_64(t_1) t_5 = self.l_65(t_1) t_7 = self.l_66(t_1) t_2 = self.l_67(t_1) t_8 = t_5.size() t_4 = t_7.size() t_0 = t_2.size() t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_9 = t_8[0] t_6 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_5.view(t_9, t_6, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_3 = t_4[0] t_6 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_7.view(t_3, t_6, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_9 = t_0[0] t_6 = t_0[1] t_3 = t_0[2] t_0 = t_0[3] t_0 = t_2.view(t_9, t_6, t_3, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_8, t_4) t_8 = math.sqrt(64) t_8 = (t_4 / t_8) t_8 = (t_8 + attention_mask) t_8 = self.l_68(t_8) t_8 = self.l_69(t_8) t_0 = torch.matmul(t_8, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_8 = t_0.size() t_8 = t_8[slice(None, (- 2), None)] t_8 = (t_8 + (1024,)) t_4 = t_8[0] t_3 = t_8[1] t_8 = t_8[2] t_8 = t_0.view(t_4, t_3, t_8) t_8 = self.l_70(t_8) t_8 = self.l_71(t_8) t_1 = (t_8 + t_1) t_1 = self.l_72(t_1) t_8 = self.l_73(t_1) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_74(t_8) t_8 = self.l_75(t_8) t_1 = (t_8 + t_1) t_1 = self.l_76(t_1) return (t_1,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'bert.encoder.6.attention.self.query', 'l_1': 'bert.encoder.6.attention.self.key', 'l_2': 'bert.encoder.6.attention.self.value', 'l_3': 'bert.encoder.6.attention.self.softmax', 'l_4': 'bert.encoder.6.attention.self.dropout', 'l_5': 'bert.encoder.6.attention.output.dense', 'l_6': 'bert.encoder.6.attention.output.dropout', 'l_7': 'bert.encoder.6.attention.output.LayerNorm', 'l_8': 'bert.encoder.6.intermediate.dense', 'l_9': 'bert.encoder.6.output.dense', 'l_10': 'bert.encoder.6.output.dropout', 'l_11': 'bert.encoder.6.output.LayerNorm', 'l_12': 'bert.encoder.7.attention.self.query', 'l_13': 'bert.encoder.7.attention.self.key', 'l_14': 'bert.encoder.7.attention.self.value', 'l_15': 'bert.encoder.7.attention.self.softmax', 'l_16': 'bert.encoder.7.attention.self.dropout', 'l_17': 'bert.encoder.7.attention.output.dense', 'l_18': 'bert.encoder.7.attention.output.dropout', 'l_19': 'bert.encoder.7.attention.output.LayerNorm', 'l_20': 'bert.encoder.7.intermediate.dense', 'l_21': 'bert.encoder.7.output.dense', 'l_22': 'bert.encoder.7.output.dropout', 'l_23': 'bert.encoder.7.output.LayerNorm', 'l_24': 'bert.encoder.8.attention.self.query', 'l_25': 'bert.encoder.8.attention.self.key', 'l_26': 'bert.encoder.8.attention.self.value', 'l_27': 'bert.encoder.8.attention.self.softmax', 'l_28': 'bert.encoder.8.attention.self.dropout', 'l_29': 'bert.encoder.8.attention.output.dense', 'l_30': 'bert.encoder.8.attention.output.dropout', 'l_31': 'bert.encoder.8.attention.output.LayerNorm', 'l_32': 'bert.encoder.8.intermediate.dense', 'l_33': 'bert.encoder.8.output.dense', 'l_34': 'bert.encoder.8.output.dropout', 'l_35': 'bert.encoder.8.output.LayerNorm', 'l_36': 'bert.encoder.9.attention.self.query', 'l_37': 'bert.encoder.9.attention.self.key', 'l_38': 'bert.encoder.9.attention.self.value', 'l_39': 'bert.encoder.9.attention.self.softmax', 'l_40': 'bert.encoder.9.attention.self.dropout', 'l_41': 'bert.encoder.9.attention.output.dense', 'l_42': 'bert.encoder.9.attention.output.dropout', 'l_43': 'bert.encoder.9.attention.output.LayerNorm', 'l_44': 'bert.encoder.9.intermediate.dense', 'l_45': 'bert.encoder.9.output.dense', 'l_46': 'bert.encoder.9.output.dropout', 'l_47': 'bert.encoder.9.output.LayerNorm', 'l_48': 'bert.encoder.10.attention.self.query', 'l_49': 'bert.encoder.10.attention.self.key', 'l_50': 'bert.encoder.10.attention.self.value', 'l_51': 'bert.encoder.10.attention.self.softmax', 'l_52': 'bert.encoder.10.attention.self.dropout', 'l_53': 'bert.encoder.10.attention.output.dense', 'l_54': 'bert.encoder.10.attention.output.dropout', 'l_55': 'bert.encoder.10.attention.output.LayerNorm', 'l_56': 'bert.encoder.10.intermediate.dense', 'l_57': 'bert.encoder.10.output.dense', 'l_58': 'bert.encoder.10.output.dropout', 'l_59': 'bert.encoder.10.output.LayerNorm', 'l_60': 'bert.encoder.11.attention.self.query', 'l_61': 'bert.encoder.11.attention.self.key', 'l_62': 'bert.encoder.11.attention.self.value', 'l_63': 'bert.encoder.11.attention.self.softmax', 'l_64': 'bert.encoder.11.attention.self.dropout', 'l_65': 'bert.encoder.11.attention.output.dense', 'l_66': 'bert.encoder.11.attention.output.dropout', 'l_67': 'bert.encoder.11.attention.output.LayerNorm', 'l_68': 'bert.encoder.11.intermediate.dense', 'l_69': 'bert.encoder.11.output.dense', 'l_70': 'bert.encoder.11.output.dropout', 'l_71': 'bert.encoder.11.output.LayerNorm', 'l_72': 'bert.encoder.12.attention.self.query'} self.to(self.device) def forward(self, *args): (attention_mask, x0) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(x0) t_2 = self.l_2(x0) t_3 = t_0.size() t_4 = t_1.size() t_5 = t_2.size() t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_6 = t_3[0] t_7 = t_3[1] t_8 = t_3[2] t_3 = t_3[3] t_3 = t_0.view(t_6, t_7, t_8, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_8 = t_4[0] t_7 = t_4[1] t_6 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_8, t_7, t_6, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_6 = t_5[0] t_7 = t_5[1] t_8 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_6, t_7, t_8, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_3, t_4) t_3 = math.sqrt(64) t_3 = (t_4 / t_3) t_3 = (t_3 + attention_mask) t_3 = self.l_3(t_3) t_3 = self.l_4(t_3) t_5 = torch.matmul(t_3, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_3 = t_5.size() t_3 = t_3[slice(None, (- 2), None)] t_3 = (t_3 + (1024,)) t_4 = t_3[0] t_8 = t_3[1] t_3 = t_3[2] t_3 = t_5.view(t_4, t_8, t_3) t_3 = self.l_5(t_3) t_3 = self.l_6(t_3) t_3 = (t_3 + x0) t_3 = self.l_7(t_3) t_8 = self.l_8(t_3) t_8 = torch.nn.functional.gelu(t_8) t_8 = self.l_9(t_8) t_8 = self.l_10(t_8) t_3 = (t_8 + t_3) t_3 = self.l_11(t_3) t_8 = self.l_12(t_3) t_4 = self.l_13(t_3) t_5 = self.l_14(t_3) t_7 = t_8.size() t_6 = t_4.size() t_2 = t_5.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_1 = t_7[0] t_0 = t_7[1] t_9 = t_7[2] t_7 = t_7[3] t_7 = t_8.view(t_1, t_0, t_9, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_9 = t_6[0] t_0 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_9, t_0, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_1 = t_2[0] t_0 = t_2[1] t_9 = t_2[2] t_2 = t_2[3] t_2 = t_5.view(t_1, t_0, t_9, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_6 = t_6.transpose((- 1), (- 2)) t_6 = torch.matmul(t_7, t_6) t_7 = math.sqrt(64) t_7 = (t_6 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_15(t_7) t_7 = self.l_16(t_7) t_2 = torch.matmul(t_7, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_2 = t_2.contiguous() t_7 = t_2.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_6 = t_7[0] t_9 = t_7[1] t_7 = t_7[2] t_7 = t_2.view(t_6, t_9, t_7) t_7 = self.l_17(t_7) t_7 = self.l_18(t_7) t_3 = (t_7 + t_3) t_3 = self.l_19(t_3) t_7 = self.l_20(t_3) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_21(t_7) t_7 = self.l_22(t_7) t_3 = (t_7 + t_3) t_3 = self.l_23(t_3) t_7 = self.l_24(t_3) t_9 = self.l_25(t_3) t_6 = self.l_26(t_3) t_2 = t_7.size() t_0 = t_9.size() t_1 = t_6.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_5 = t_2[0] t_4 = t_2[1] t_8 = t_2[2] t_2 = t_2[3] t_2 = t_7.view(t_5, t_4, t_8, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_8 = t_0[0] t_4 = t_0[1] t_5 = t_0[2] t_0 = t_0[3] t_0 = t_9.view(t_8, t_4, t_5, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_5 = t_1[0] t_4 = t_1[1] t_8 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_5, t_4, t_8, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_2, t_0) t_2 = math.sqrt(64) t_2 = (t_0 / t_2) t_2 = (t_2 + attention_mask) t_2 = self.l_27(t_2) t_2 = self.l_28(t_2) t_1 = torch.matmul(t_2, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_1 = t_1.contiguous() t_2 = t_1.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_0 = t_2[0] t_8 = t_2[1] t_2 = t_2[2] t_2 = t_1.view(t_0, t_8, t_2) t_2 = self.l_29(t_2) t_2 = self.l_30(t_2) t_3 = (t_2 + t_3) t_3 = self.l_31(t_3) t_2 = self.l_32(t_3) t_2 = torch.nn.functional.gelu(t_2) t_2 = self.l_33(t_2) t_2 = self.l_34(t_2) t_3 = (t_2 + t_3) t_3 = self.l_35(t_3) t_2 = self.l_36(t_3) t_8 = self.l_37(t_3) t_0 = self.l_38(t_3) t_1 = t_2.size() t_4 = t_8.size() t_5 = t_0.size() t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_6 = t_1[0] t_9 = t_1[1] t_7 = t_1[2] t_1 = t_1[3] t_1 = t_2.view(t_6, t_9, t_7, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_9 = t_4[1] t_6 = t_4[2] t_4 = t_4[3] t_4 = t_8.view(t_7, t_9, t_6, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_6 = t_5[0] t_9 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_6, t_9, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_4 = t_4.transpose((- 1), (- 2)) t_4 = torch.matmul(t_1, t_4) t_1 = math.sqrt(64) t_1 = (t_4 / t_1) t_1 = (t_1 + attention_mask) t_1 = self.l_39(t_1) t_1 = self.l_40(t_1) t_5 = torch.matmul(t_1, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_1 = t_5.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_4 = t_1[0] t_7 = t_1[1] t_1 = t_1[2] t_1 = t_5.view(t_4, t_7, t_1) t_1 = self.l_41(t_1) t_1 = self.l_42(t_1) t_3 = (t_1 + t_3) t_3 = self.l_43(t_3) t_1 = self.l_44(t_3) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_45(t_1) t_1 = self.l_46(t_1) t_3 = (t_1 + t_3) t_3 = self.l_47(t_3) t_1 = self.l_48(t_3) t_7 = self.l_49(t_3) t_4 = self.l_50(t_3) t_5 = t_1.size() t_9 = t_7.size() t_6 = t_4.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_0 = t_5[0] t_8 = t_5[1] t_2 = t_5[2] t_5 = t_5[3] t_5 = t_1.view(t_0, t_8, t_2, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_2 = t_9[0] t_8 = t_9[1] t_0 = t_9[2] t_9 = t_9[3] t_9 = t_7.view(t_2, t_8, t_0, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_8 = t_6[1] t_2 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_8, t_2, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_5, t_9) t_5 = math.sqrt(64) t_5 = (t_9 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_51(t_5) t_5 = self.l_52(t_5) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_5 = t_6.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_9 = t_5[0] t_2 = t_5[1] t_5 = t_5[2] t_5 = t_6.view(t_9, t_2, t_5) t_5 = self.l_53(t_5) t_5 = self.l_54(t_5) t_3 = (t_5 + t_3) t_3 = self.l_55(t_3) t_5 = self.l_56(t_3) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_57(t_5) t_5 = self.l_58(t_5) t_3 = (t_5 + t_3) t_3 = self.l_59(t_3) t_5 = self.l_60(t_3) t_2 = self.l_61(t_3) t_9 = self.l_62(t_3) t_6 = t_5.size() t_8 = t_2.size() t_0 = t_9.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_4 = t_6[0] t_7 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_5.view(t_4, t_7, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_7 = t_8[1] t_4 = t_8[2] t_8 = t_8[3] t_8 = t_2.view(t_1, t_7, t_4, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_7 = t_0[1] t_1 = t_0[2] t_0 = t_0[3] t_0 = t_9.view(t_4, t_7, t_1, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_63(t_6) t_6 = self.l_64(t_6) t_0 = torch.matmul(t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_6 = t_0.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_0.view(t_8, t_1, t_6) t_6 = self.l_65(t_6) t_6 = self.l_66(t_6) t_3 = (t_6 + t_3) t_3 = self.l_67(t_3) t_6 = self.l_68(t_3) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_69(t_6) t_6 = self.l_70(t_6) t_3 = (t_6 + t_3) t_3 = self.l_71(t_3) t_6 = self.l_72(t_3) return list(flatten((t_3, t_6))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.12.attention.self.key', 'l_1': 'bert.encoder.12.attention.self.value', 'l_2': 'bert.encoder.12.attention.self.softmax', 'l_3': 'bert.encoder.12.attention.self.dropout', 'l_4': 'bert.encoder.12.attention.output.dense', 'l_5': 'bert.encoder.12.attention.output.dropout', 'l_6': 'bert.encoder.12.attention.output.LayerNorm', 'l_7': 'bert.encoder.12.intermediate.dense', 'l_8': 'bert.encoder.12.output.dense', 'l_9': 'bert.encoder.12.output.dropout', 'l_10': 'bert.encoder.12.output.LayerNorm', 'l_11': 'bert.encoder.13.attention.self.query', 'l_12': 'bert.encoder.13.attention.self.key', 'l_13': 'bert.encoder.13.attention.self.value', 'l_14': 'bert.encoder.13.attention.self.softmax', 'l_15': 'bert.encoder.13.attention.self.dropout', 'l_16': 'bert.encoder.13.attention.output.dense', 'l_17': 'bert.encoder.13.attention.output.dropout', 'l_18': 'bert.encoder.13.attention.output.LayerNorm', 'l_19': 'bert.encoder.13.intermediate.dense', 'l_20': 'bert.encoder.13.output.dense', 'l_21': 'bert.encoder.13.output.dropout', 'l_22': 'bert.encoder.13.output.LayerNorm', 'l_23': 'bert.encoder.14.attention.self.query', 'l_24': 'bert.encoder.14.attention.self.key', 'l_25': 'bert.encoder.14.attention.self.value', 'l_26': 'bert.encoder.14.attention.self.softmax', 'l_27': 'bert.encoder.14.attention.self.dropout', 'l_28': 'bert.encoder.14.attention.output.dense', 'l_29': 'bert.encoder.14.attention.output.dropout', 'l_30': 'bert.encoder.14.attention.output.LayerNorm', 'l_31': 'bert.encoder.14.intermediate.dense', 'l_32': 'bert.encoder.14.output.dense', 'l_33': 'bert.encoder.14.output.dropout', 'l_34': 'bert.encoder.14.output.LayerNorm', 'l_35': 'bert.encoder.15.attention.self.query', 'l_36': 'bert.encoder.15.attention.self.key', 'l_37': 'bert.encoder.15.attention.self.value', 'l_38': 'bert.encoder.15.attention.self.softmax', 'l_39': 'bert.encoder.15.attention.self.dropout', 'l_40': 'bert.encoder.15.attention.output.dense', 'l_41': 'bert.encoder.15.attention.output.dropout', 'l_42': 'bert.encoder.15.attention.output.LayerNorm', 'l_43': 'bert.encoder.15.intermediate.dense', 'l_44': 'bert.encoder.15.output.dense', 'l_45': 'bert.encoder.15.output.dropout', 'l_46': 'bert.encoder.15.output.LayerNorm', 'l_47': 'bert.encoder.16.attention.self.query', 'l_48': 'bert.encoder.16.attention.self.key', 'l_49': 'bert.encoder.16.attention.self.value', 'l_50': 'bert.encoder.16.attention.self.softmax', 'l_51': 'bert.encoder.16.attention.self.dropout', 'l_52': 'bert.encoder.16.attention.output.dense', 'l_53': 'bert.encoder.16.attention.output.dropout', 'l_54': 'bert.encoder.16.attention.output.LayerNorm', 'l_55': 'bert.encoder.16.intermediate.dense', 'l_56': 'bert.encoder.16.output.dense', 'l_57': 'bert.encoder.16.output.dropout', 'l_58': 'bert.encoder.16.output.LayerNorm', 'l_59': 'bert.encoder.17.attention.self.query', 'l_60': 'bert.encoder.17.attention.self.key', 'l_61': 'bert.encoder.17.attention.self.value', 'l_62': 'bert.encoder.17.attention.self.softmax', 'l_63': 'bert.encoder.17.attention.self.dropout', 'l_64': 'bert.encoder.17.attention.output.dense', 'l_65': 'bert.encoder.17.attention.output.dropout', 'l_66': 'bert.encoder.17.attention.output.LayerNorm', 'l_67': 'bert.encoder.17.intermediate.dense', 'l_68': 'bert.encoder.17.output.dense', 'l_69': 'bert.encoder.17.output.dropout', 'l_70': 'bert.encoder.17.output.LayerNorm', 'l_71': 'bert.encoder.18.attention.self.query'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(x0) t_2 = x1.size() t_3 = t_0.size() t_4 = t_1.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_5 = t_2[0] t_6 = t_2[1] t_7 = t_2[2] t_2 = t_2[3] t_2 = x1.view(t_5, t_6, t_7, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_6 = t_3[1] t_5 = t_3[2] t_3 = t_3[3] t_3 = t_0.view(t_7, t_6, t_5, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_5 = t_4[0] t_6 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_5, t_6, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_2, t_3) t_2 = math.sqrt(64) t_2 = (t_3 / t_2) t_2 = (t_2 + attention_mask) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_4 = torch.matmul(t_2, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_2 = t_4.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_3 = t_2[0] t_7 = t_2[1] t_2 = t_2[2] t_2 = t_4.view(t_3, t_7, t_2) t_2 = self.l_4(t_2) t_2 = self.l_5(t_2) t_2 = (t_2 + x0) t_2 = self.l_6(t_2) t_7 = self.l_7(t_2) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_8(t_7) t_7 = self.l_9(t_7) t_2 = (t_7 + t_2) t_2 = self.l_10(t_2) t_7 = self.l_11(t_2) t_3 = self.l_12(t_2) t_4 = self.l_13(t_2) t_6 = t_7.size() t_5 = t_3.size() t_1 = t_4.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_7.view(t_0, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_9, t_8, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_0 = t_1[0] t_8 = t_1[1] t_9 = t_1[2] t_1 = t_1[3] t_1 = t_4.view(t_0, t_8, t_9, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_6, t_5) t_6 = math.sqrt(64) t_6 = (t_5 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_14(t_6) t_6 = self.l_15(t_6) t_1 = torch.matmul(t_6, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_1 = t_1.contiguous() t_6 = t_1.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_5 = t_6[0] t_9 = t_6[1] t_6 = t_6[2] t_6 = t_1.view(t_5, t_9, t_6) t_6 = self.l_16(t_6) t_6 = self.l_17(t_6) t_2 = (t_6 + t_2) t_2 = self.l_18(t_2) t_6 = self.l_19(t_2) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_2 = (t_6 + t_2) t_2 = self.l_22(t_2) t_6 = self.l_23(t_2) t_9 = self.l_24(t_2) t_5 = self.l_25(t_2) t_1 = t_6.size() t_8 = t_9.size() t_0 = t_5.size() t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_4 = t_1[0] t_3 = t_1[1] t_7 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_4, t_3, t_7, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_7 = t_8[0] t_3 = t_8[1] t_4 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_7, t_3, t_4, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_3 = t_0[1] t_7 = t_0[2] t_0 = t_0[3] t_0 = t_5.view(t_4, t_3, t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_1, t_8) t_1 = math.sqrt(64) t_1 = (t_8 / t_1) t_1 = (t_1 + attention_mask) t_1 = self.l_26(t_1) t_1 = self.l_27(t_1) t_0 = torch.matmul(t_1, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_8 = t_1[0] t_7 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_8, t_7, t_1) t_1 = self.l_28(t_1) t_1 = self.l_29(t_1) t_2 = (t_1 + t_2) t_2 = self.l_30(t_2) t_1 = self.l_31(t_2) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_2 = (t_1 + t_2) t_2 = self.l_34(t_2) t_1 = self.l_35(t_2) t_7 = self.l_36(t_2) t_8 = self.l_37(t_2) t_0 = t_1.size() t_3 = t_7.size() t_4 = t_8.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_1.view(t_5, t_9, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_6 = t_3[0] t_9 = t_3[1] t_5 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_6, t_9, t_5, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_5 = t_4[0] t_9 = t_4[1] t_6 = t_4[2] t_4 = t_4[3] t_4 = t_8.view(t_5, t_9, t_6, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_0, t_3) t_0 = math.sqrt(64) t_0 = (t_3 / t_0) t_0 = (t_0 + attention_mask) t_0 = self.l_38(t_0) t_0 = self.l_39(t_0) t_4 = torch.matmul(t_0, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_0 = t_4.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_3 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_4.view(t_3, t_6, t_0) t_0 = self.l_40(t_0) t_0 = self.l_41(t_0) t_2 = (t_0 + t_2) t_2 = self.l_42(t_2) t_0 = self.l_43(t_2) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_44(t_0) t_0 = self.l_45(t_0) t_2 = (t_0 + t_2) t_2 = self.l_46(t_2) t_0 = self.l_47(t_2) t_6 = self.l_48(t_2) t_3 = self.l_49(t_2) t_4 = t_0.size() t_9 = t_6.size() t_5 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_8 = t_4[0] t_7 = t_4[1] t_1 = t_4[2] t_4 = t_4[3] t_4 = t_0.view(t_8, t_7, t_1, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_1 = t_9[0] t_7 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_6.view(t_1, t_7, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_7 = t_5[1] t_1 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_7, t_1, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_4, t_9) t_4 = math.sqrt(64) t_4 = (t_9 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_50(t_4) t_4 = self.l_51(t_4) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_4 = t_5.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_9 = t_4[0] t_1 = t_4[1] t_4 = t_4[2] t_4 = t_5.view(t_9, t_1, t_4) t_4 = self.l_52(t_4) t_4 = self.l_53(t_4) t_2 = (t_4 + t_2) t_2 = self.l_54(t_2) t_4 = self.l_55(t_2) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_56(t_4) t_4 = self.l_57(t_4) t_2 = (t_4 + t_2) t_2 = self.l_58(t_2) t_4 = self.l_59(t_2) t_1 = self.l_60(t_2) t_9 = self.l_61(t_2) t_5 = t_4.size() t_7 = t_1.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_3 = t_5[0] t_6 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_4.view(t_3, t_6, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_6 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_1.view(t_0, t_6, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_6 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_6, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_5, t_7) t_5 = math.sqrt(64) t_5 = (t_7 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_62(t_5) t_5 = self.l_63(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_7 = t_5[0] t_0 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_7, t_0, t_5) t_5 = self.l_64(t_5) t_5 = self.l_65(t_5) t_2 = (t_5 + t_2) t_2 = self.l_66(t_2) t_5 = self.l_67(t_2) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_68(t_5) t_5 = self.l_69(t_5) t_2 = (t_5 + t_2) t_2 = self.l_70(t_2) t_5 = self.l_71(t_2) return list(flatten((t_2, t_5))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.18.attention.self.key', 'l_1': 'bert.encoder.18.attention.self.value', 'l_2': 'bert.encoder.18.attention.self.softmax', 'l_3': 'bert.encoder.18.attention.self.dropout', 'l_4': 'bert.encoder.18.attention.output.dense', 'l_5': 'bert.encoder.18.attention.output.dropout', 'l_6': 'bert.encoder.18.attention.output.LayerNorm', 'l_7': 'bert.encoder.18.intermediate.dense', 'l_8': 'bert.encoder.18.output.dense', 'l_9': 'bert.encoder.18.output.dropout', 'l_10': 'bert.encoder.18.output.LayerNorm', 'l_11': 'bert.encoder.19.attention.self.query', 'l_12': 'bert.encoder.19.attention.self.key', 'l_13': 'bert.encoder.19.attention.self.value', 'l_14': 'bert.encoder.19.attention.self.softmax', 'l_15': 'bert.encoder.19.attention.self.dropout', 'l_16': 'bert.encoder.19.attention.output.dense', 'l_17': 'bert.encoder.19.attention.output.dropout', 'l_18': 'bert.encoder.19.attention.output.LayerNorm', 'l_19': 'bert.encoder.19.intermediate.dense', 'l_20': 'bert.encoder.19.output.dense', 'l_21': 'bert.encoder.19.output.dropout', 'l_22': 'bert.encoder.19.output.LayerNorm', 'l_23': 'bert.encoder.20.attention.self.query', 'l_24': 'bert.encoder.20.attention.self.key', 'l_25': 'bert.encoder.20.attention.self.value', 'l_26': 'bert.encoder.20.attention.self.softmax', 'l_27': 'bert.encoder.20.attention.self.dropout', 'l_28': 'bert.encoder.20.attention.output.dense', 'l_29': 'bert.encoder.20.attention.output.dropout', 'l_30': 'bert.encoder.20.attention.output.LayerNorm', 'l_31': 'bert.encoder.20.intermediate.dense', 'l_32': 'bert.encoder.20.output.dense', 'l_33': 'bert.encoder.20.output.dropout', 'l_34': 'bert.encoder.20.output.LayerNorm', 'l_35': 'bert.encoder.21.attention.self.query', 'l_36': 'bert.encoder.21.attention.self.key', 'l_37': 'bert.encoder.21.attention.self.value', 'l_38': 'bert.encoder.21.attention.self.softmax', 'l_39': 'bert.encoder.21.attention.self.dropout', 'l_40': 'bert.encoder.21.attention.output.dense', 'l_41': 'bert.encoder.21.attention.output.dropout', 'l_42': 'bert.encoder.21.attention.output.LayerNorm', 'l_43': 'bert.encoder.21.intermediate.dense', 'l_44': 'bert.encoder.21.output.dense', 'l_45': 'bert.encoder.21.output.dropout', 'l_46': 'bert.encoder.21.output.LayerNorm', 'l_47': 'bert.encoder.22.attention.self.query', 'l_48': 'bert.encoder.22.attention.self.key', 'l_49': 'bert.encoder.22.attention.self.value', 'l_50': 'bert.encoder.22.attention.self.softmax', 'l_51': 'bert.encoder.22.attention.self.dropout', 'l_52': 'bert.encoder.22.attention.output.dense', 'l_53': 'bert.encoder.22.attention.output.dropout', 'l_54': 'bert.encoder.22.attention.output.LayerNorm', 'l_55': 'bert.encoder.22.intermediate.dense', 'l_56': 'bert.encoder.22.output.dense', 'l_57': 'bert.encoder.22.output.dropout', 'l_58': 'bert.encoder.22.output.LayerNorm', 'l_59': 'bert.encoder.23.attention.self.query', 'l_60': 'bert.encoder.23.attention.self.key', 'l_61': 'bert.encoder.23.attention.self.value', 'l_62': 'bert.encoder.23.attention.self.softmax', 'l_63': 'bert.encoder.23.attention.self.dropout', 'l_64': 'bert.encoder.23.attention.output.dense', 'l_65': 'bert.encoder.23.attention.output.dropout', 'l_66': 'bert.encoder.23.attention.output.LayerNorm', 'l_67': 'bert.encoder.23.intermediate.dense', 'l_68': 'bert.encoder.23.output.dense', 'l_69': 'bert.encoder.23.output.dropout', 'l_70': 'bert.encoder.23.output.LayerNorm', 'l_71': 'bert.pooler.dense', 'l_72': 'bert.pooler.activation', 'l_73': 'qa_outputs'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(x0) t_2 = x1.size() t_3 = t_0.size() t_4 = t_1.size() t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_5 = t_2[0] t_6 = t_2[1] t_7 = t_2[2] t_2 = t_2[3] t_2 = x1.view(t_5, t_6, t_7, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_7 = t_3[0] t_6 = t_3[1] t_5 = t_3[2] t_3 = t_3[3] t_3 = t_0.view(t_7, t_6, t_5, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_5 = t_4[0] t_6 = t_4[1] t_7 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_5, t_6, t_7, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_2, t_3) t_2 = math.sqrt(64) t_2 = (t_3 / t_2) t_2 = (t_2 + attention_mask) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_4 = torch.matmul(t_2, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_2 = t_4.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_3 = t_2[0] t_7 = t_2[1] t_2 = t_2[2] t_2 = t_4.view(t_3, t_7, t_2) t_2 = self.l_4(t_2) t_2 = self.l_5(t_2) t_2 = (t_2 + x0) t_2 = self.l_6(t_2) t_7 = self.l_7(t_2) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_8(t_7) t_7 = self.l_9(t_7) t_2 = (t_7 + t_2) t_2 = self.l_10(t_2) t_7 = self.l_11(t_2) t_3 = self.l_12(t_2) t_4 = self.l_13(t_2) t_6 = t_7.size() t_5 = t_3.size() t_1 = t_4.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_7.view(t_0, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_9, t_8, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_0 = t_1[0] t_8 = t_1[1] t_9 = t_1[2] t_1 = t_1[3] t_1 = t_4.view(t_0, t_8, t_9, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_6, t_5) t_6 = math.sqrt(64) t_6 = (t_5 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_14(t_6) t_6 = self.l_15(t_6) t_1 = torch.matmul(t_6, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_1 = t_1.contiguous() t_6 = t_1.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_5 = t_6[0] t_9 = t_6[1] t_6 = t_6[2] t_6 = t_1.view(t_5, t_9, t_6) t_6 = self.l_16(t_6) t_6 = self.l_17(t_6) t_2 = (t_6 + t_2) t_2 = self.l_18(t_2) t_6 = self.l_19(t_2) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_2 = (t_6 + t_2) t_2 = self.l_22(t_2) t_6 = self.l_23(t_2) t_9 = self.l_24(t_2) t_5 = self.l_25(t_2) t_1 = t_6.size() t_8 = t_9.size() t_0 = t_5.size() t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_4 = t_1[0] t_3 = t_1[1] t_7 = t_1[2] t_1 = t_1[3] t_1 = t_6.view(t_4, t_3, t_7, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_7 = t_8[0] t_3 = t_8[1] t_4 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_7, t_3, t_4, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_3 = t_0[1] t_7 = t_0[2] t_0 = t_0[3] t_0 = t_5.view(t_4, t_3, t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_1, t_8) t_1 = math.sqrt(64) t_1 = (t_8 / t_1) t_1 = (t_1 + attention_mask) t_1 = self.l_26(t_1) t_1 = self.l_27(t_1) t_0 = torch.matmul(t_1, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_8 = t_1[0] t_7 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_8, t_7, t_1) t_1 = self.l_28(t_1) t_1 = self.l_29(t_1) t_2 = (t_1 + t_2) t_2 = self.l_30(t_2) t_1 = self.l_31(t_2) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_32(t_1) t_1 = self.l_33(t_1) t_2 = (t_1 + t_2) t_2 = self.l_34(t_2) t_1 = self.l_35(t_2) t_7 = self.l_36(t_2) t_8 = self.l_37(t_2) t_0 = t_1.size() t_3 = t_7.size() t_4 = t_8.size() t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_6 = t_0[2] t_0 = t_0[3] t_0 = t_1.view(t_5, t_9, t_6, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_6 = t_3[0] t_9 = t_3[1] t_5 = t_3[2] t_3 = t_3[3] t_3 = t_7.view(t_6, t_9, t_5, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_5 = t_4[0] t_9 = t_4[1] t_6 = t_4[2] t_4 = t_4[3] t_4 = t_8.view(t_5, t_9, t_6, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_3 = t_3.transpose((- 1), (- 2)) t_3 = torch.matmul(t_0, t_3) t_0 = math.sqrt(64) t_0 = (t_3 / t_0) t_0 = (t_0 + attention_mask) t_0 = self.l_38(t_0) t_0 = self.l_39(t_0) t_4 = torch.matmul(t_0, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_4 = t_4.contiguous() t_0 = t_4.size() t_0 = t_0[slice(None, (- 2), None)] t_0 = (t_0 + (1024,)) t_3 = t_0[0] t_6 = t_0[1] t_0 = t_0[2] t_0 = t_4.view(t_3, t_6, t_0) t_0 = self.l_40(t_0) t_0 = self.l_41(t_0) t_2 = (t_0 + t_2) t_2 = self.l_42(t_2) t_0 = self.l_43(t_2) t_0 = torch.nn.functional.gelu(t_0) t_0 = self.l_44(t_0) t_0 = self.l_45(t_0) t_2 = (t_0 + t_2) t_2 = self.l_46(t_2) t_0 = self.l_47(t_2) t_6 = self.l_48(t_2) t_3 = self.l_49(t_2) t_4 = t_0.size() t_9 = t_6.size() t_5 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_8 = t_4[0] t_7 = t_4[1] t_1 = t_4[2] t_4 = t_4[3] t_4 = t_0.view(t_8, t_7, t_1, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_9 = t_9[slice(None, (- 1), None)] t_9 = (t_9 + (16, 64)) t_1 = t_9[0] t_7 = t_9[1] t_8 = t_9[2] t_9 = t_9[3] t_9 = t_6.view(t_1, t_7, t_8, t_9) t_9 = t_9.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_8 = t_5[0] t_7 = t_5[1] t_1 = t_5[2] t_5 = t_5[3] t_5 = t_3.view(t_8, t_7, t_1, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_9 = t_9.transpose((- 1), (- 2)) t_9 = torch.matmul(t_4, t_9) t_4 = math.sqrt(64) t_4 = (t_9 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_50(t_4) t_4 = self.l_51(t_4) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_5 = t_5.contiguous() t_4 = t_5.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_9 = t_4[0] t_1 = t_4[1] t_4 = t_4[2] t_4 = t_5.view(t_9, t_1, t_4) t_4 = self.l_52(t_4) t_4 = self.l_53(t_4) t_2 = (t_4 + t_2) t_2 = self.l_54(t_2) t_4 = self.l_55(t_2) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_56(t_4) t_4 = self.l_57(t_4) t_2 = (t_4 + t_2) t_2 = self.l_58(t_2) t_4 = self.l_59(t_2) t_1 = self.l_60(t_2) t_9 = self.l_61(t_2) t_5 = t_4.size() t_7 = t_1.size() t_8 = t_9.size() t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_3 = t_5[0] t_6 = t_5[1] t_0 = t_5[2] t_5 = t_5[3] t_5 = t_4.view(t_3, t_6, t_0, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_6 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_1.view(t_0, t_6, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_6 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_6, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7.transpose((- 1), (- 2)) t_7 = torch.matmul(t_5, t_7) t_5 = math.sqrt(64) t_5 = (t_7 / t_5) t_5 = (t_5 + attention_mask) t_5 = self.l_62(t_5) t_5 = self.l_63(t_5) t_8 = torch.matmul(t_5, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_8 = t_8.contiguous() t_5 = t_8.size() t_5 = t_5[slice(None, (- 2), None)] t_5 = (t_5 + (1024,)) t_7 = t_5[0] t_0 = t_5[1] t_5 = t_5[2] t_5 = t_8.view(t_7, t_0, t_5) t_5 = self.l_64(t_5) t_5 = self.l_65(t_5) t_2 = (t_5 + t_2) t_2 = self.l_66(t_2) t_5 = self.l_67(t_2) t_5 = torch.nn.functional.gelu(t_5) t_5 = self.l_68(t_5) t_5 = self.l_69(t_5) t_2 = (t_5 + t_2) t_2 = self.l_70(t_2) t_5 = self.l_73(t_2) t_2 = t_2[(slice(None, None, None), 0)] t_2 = self.l_71(t_2) t_2 = self.l_72(t_2) return (t_5,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def bert_large_uncased_whole_word_maskings_384_4p_bw12_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def create_pipeline_configuration(DEBUG=False, batch_size=24): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (LayerNorm, Linear, Tanh, Softmax, Embedding, Dropout), 'model_inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3, 4, 5, 6, 7]}, 'input_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([24, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/torch.nn.functional::gelu_310': {'shape': torch.Size([24, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/torch.nn.functional::gelu_310': {'shape': torch.Size([24, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]': {'shape': torch.Size([24, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]': {'shape': torch.Size([24, 384, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Tensor::view_1195': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Tensor::view_1195': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1463': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]': {'shape': torch.Size([24, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1463': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]': {'shape': torch.Size([24, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1760': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Tensor::__truediv___1767': {'shape': torch.Size([24, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_1760': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Tensor::__truediv___1767': {'shape': torch.Size([24, 16, 384, 384]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_2019': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_2038': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::view_2052': {'shape': torch.Size([24, 384, 16, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_2019': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::permute_2038': {'shape': torch.Size([24, 16, 384, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Tensor::view_2052': {'shape': torch.Size([24, 384, 16, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([24, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense'} self.to(self.device) def forward(self, *args): (attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure) t_0 = self.l_0(input_ids) t_1 = self.l_2(token_type_ids) t_2 = input_ids.size(1) t_2 = torch.arange(t_2, dtype=torch.int64, device=self.device) t_2 = t_2.unsqueeze(0) t_2 = t_2.expand_as(input_ids) t_2 = self.l_1(t_2) t_2 = (t_0 + t_2) t_1 = (t_2 + t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_2 = self.l_5(t_1) t_0 = self.l_6(t_1) t_3 = self.l_7(t_1) t_4 = t_2.size() t_5 = t_0.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_2.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_0.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_1 = (t_4 + t_1) t_1 = self.l_12(t_1) t_4 = self.l_13(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_1 = (t_4 + t_1) t_1 = self.l_16(t_1) t_4 = self.l_17(t_1) t_9 = self.l_18(t_1) t_5 = self.l_19(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_0 = t_6[1] t_2 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_0, t_2, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_2 = t_8[0] t_0 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_2, t_0, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_0 = t_7[1] t_2 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_0, t_2, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_2 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_2, t_6) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_1 = (t_6 + t_1) t_1 = self.l_24(t_1) t_6 = self.l_25(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_1 = (t_6 + t_1) t_1 = self.l_28(t_1) t_6 = self.l_29(t_1) t_2 = self.l_30(t_1) t_8 = self.l_31(t_1) t_7 = t_6.size() t_0 = t_2.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_4 = t_0[0] t_9 = t_0[1] t_5 = t_0[2] t_0 = t_0[3] t_0 = t_2.view(t_4, t_9, t_5, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_0 = t_0.transpose((- 1), (- 2)) t_0 = torch.matmul(t_7, t_0) t_7 = math.sqrt(64) t_7 = (t_0 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_0 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_0, t_4, t_7) t_7 = self.l_34(t_7) t_7 = self.l_35(t_7) t_1 = (t_7 + t_1) t_1 = self.l_36(t_1) t_7 = self.l_37(t_1) t_7 = torch.nn.functional.gelu(t_7) return list(flatten((t_1, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.2.output.dense', 'l_1': 'bert.encoder.2.output.dropout', 'l_2': 'bert.encoder.2.output.LayerNorm', 'l_3': 'bert.encoder.3.attention.self.query', 'l_4': 'bert.encoder.3.attention.self.key', 'l_5': 'bert.encoder.3.attention.self.value', 'l_6': 'bert.encoder.3.attention.self.softmax', 'l_7': 'bert.encoder.3.attention.self.dropout', 'l_8': 'bert.encoder.3.attention.output.dense', 'l_9': 'bert.encoder.3.attention.output.dropout', 'l_10': 'bert.encoder.3.attention.output.LayerNorm', 'l_11': 'bert.encoder.3.intermediate.dense', 'l_12': 'bert.encoder.3.output.dense', 'l_13': 'bert.encoder.3.output.dropout', 'l_14': 'bert.encoder.3.output.LayerNorm', 'l_15': 'bert.encoder.4.attention.self.query', 'l_16': 'bert.encoder.4.attention.self.key', 'l_17': 'bert.encoder.4.attention.self.value', 'l_18': 'bert.encoder.4.attention.self.softmax', 'l_19': 'bert.encoder.4.attention.self.dropout', 'l_20': 'bert.encoder.4.attention.output.dense', 'l_21': 'bert.encoder.4.attention.output.dropout', 'l_22': 'bert.encoder.4.attention.output.LayerNorm', 'l_23': 'bert.encoder.4.intermediate.dense', 'l_24': 'bert.encoder.4.output.dense', 'l_25': 'bert.encoder.4.output.dropout', 'l_26': 'bert.encoder.4.output.LayerNorm', 'l_27': 'bert.encoder.5.attention.self.query', 'l_28': 'bert.encoder.5.attention.self.key', 'l_29': 'bert.encoder.5.attention.self.value', 'l_30': 'bert.encoder.5.attention.self.softmax', 'l_31': 'bert.encoder.5.attention.self.dropout', 'l_32': 'bert.encoder.5.attention.output.dense', 'l_33': 'bert.encoder.5.attention.output.dropout', 'l_34': 'bert.encoder.5.attention.output.LayerNorm', 'l_35': 'bert.encoder.5.intermediate.dense'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (t_0 + x0) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_2 = self.l_4(t_0) t_3 = self.l_5(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_6(t_4) t_4 = self.l_7(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_0 = (t_4 + t_0) t_0 = self.l_10(t_0) t_4 = self.l_11(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_0 = (t_4 + t_0) t_0 = self.l_14(t_0) t_4 = self.l_15(t_0) t_9 = self.l_16(t_0) t_5 = self.l_17(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_18(t_6) t_6 = self.l_19(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_0 = (t_6 + t_0) t_0 = self.l_22(t_0) t_6 = self.l_23(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_0 = (t_6 + t_0) t_0 = self.l_26(t_0) t_6 = self.l_27(t_0) t_1 = self.l_28(t_0) t_8 = self.l_29(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_30(t_7) t_7 = self.l_31(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_0 = (t_7 + t_0) t_0 = self.l_34(t_0) t_7 = self.l_35(t_0) return list(flatten((t_0, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.5.output.dense', 'l_1': 'bert.encoder.5.output.dropout', 'l_2': 'bert.encoder.5.output.LayerNorm', 'l_3': 'bert.encoder.6.attention.self.query', 'l_4': 'bert.encoder.6.attention.self.key', 'l_5': 'bert.encoder.6.attention.self.value', 'l_6': 'bert.encoder.6.attention.self.softmax', 'l_7': 'bert.encoder.6.attention.self.dropout', 'l_8': 'bert.encoder.6.attention.output.dense', 'l_9': 'bert.encoder.6.attention.output.dropout', 'l_10': 'bert.encoder.6.attention.output.LayerNorm', 'l_11': 'bert.encoder.6.intermediate.dense', 'l_12': 'bert.encoder.6.output.dense', 'l_13': 'bert.encoder.6.output.dropout', 'l_14': 'bert.encoder.6.output.LayerNorm', 'l_15': 'bert.encoder.7.attention.self.query', 'l_16': 'bert.encoder.7.attention.self.key', 'l_17': 'bert.encoder.7.attention.self.value', 'l_18': 'bert.encoder.7.attention.self.softmax', 'l_19': 'bert.encoder.7.attention.self.dropout', 'l_20': 'bert.encoder.7.attention.output.dense', 'l_21': 'bert.encoder.7.attention.output.dropout', 'l_22': 'bert.encoder.7.attention.output.LayerNorm', 'l_23': 'bert.encoder.7.intermediate.dense', 'l_24': 'bert.encoder.7.output.dense', 'l_25': 'bert.encoder.7.output.dropout', 'l_26': 'bert.encoder.7.output.LayerNorm', 'l_27': 'bert.encoder.8.attention.self.query', 'l_28': 'bert.encoder.8.attention.self.key', 'l_29': 'bert.encoder.8.attention.self.value', 'l_30': 'bert.encoder.8.attention.self.softmax', 'l_31': 'bert.encoder.8.attention.self.dropout', 'l_32': 'bert.encoder.8.attention.output.dense', 'l_33': 'bert.encoder.8.attention.output.dropout'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = torch.nn.functional.gelu(x1) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = (t_0 + x0) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_2 = self.l_4(t_0) t_3 = self.l_5(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_6(t_4) t_4 = self.l_7(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_0 = (t_4 + t_0) t_0 = self.l_10(t_0) t_4 = self.l_11(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_0 = (t_4 + t_0) t_0 = self.l_14(t_0) t_4 = self.l_15(t_0) t_9 = self.l_16(t_0) t_5 = self.l_17(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_18(t_6) t_6 = self.l_19(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_0 = (t_6 + t_0) t_0 = self.l_22(t_0) t_6 = self.l_23(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_0 = (t_6 + t_0) t_0 = self.l_26(t_0) t_6 = self.l_27(t_0) t_1 = self.l_28(t_0) t_8 = self.l_29(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_30(t_7) t_7 = self.l_31(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) return list(flatten((t_0, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.8.attention.output.LayerNorm', 'l_1': 'bert.encoder.8.intermediate.dense', 'l_2': 'bert.encoder.8.output.dense', 'l_3': 'bert.encoder.8.output.dropout', 'l_4': 'bert.encoder.8.output.LayerNorm', 'l_5': 'bert.encoder.9.attention.self.query', 'l_6': 'bert.encoder.9.attention.self.key', 'l_7': 'bert.encoder.9.attention.self.value', 'l_8': 'bert.encoder.9.attention.self.softmax', 'l_9': 'bert.encoder.9.attention.self.dropout', 'l_10': 'bert.encoder.9.attention.output.dense', 'l_11': 'bert.encoder.9.attention.output.dropout', 'l_12': 'bert.encoder.9.attention.output.LayerNorm', 'l_13': 'bert.encoder.9.intermediate.dense', 'l_14': 'bert.encoder.9.output.dense', 'l_15': 'bert.encoder.9.output.dropout', 'l_16': 'bert.encoder.9.output.LayerNorm', 'l_17': 'bert.encoder.10.attention.self.query', 'l_18': 'bert.encoder.10.attention.self.key', 'l_19': 'bert.encoder.10.attention.self.value', 'l_20': 'bert.encoder.10.attention.self.softmax', 'l_21': 'bert.encoder.10.attention.self.dropout', 'l_22': 'bert.encoder.10.attention.output.dense', 'l_23': 'bert.encoder.10.attention.output.dropout', 'l_24': 'bert.encoder.10.attention.output.LayerNorm', 'l_25': 'bert.encoder.10.intermediate.dense', 'l_26': 'bert.encoder.10.output.dense', 'l_27': 'bert.encoder.10.output.dropout', 'l_28': 'bert.encoder.10.output.LayerNorm', 'l_29': 'bert.encoder.11.attention.self.query', 'l_30': 'bert.encoder.11.attention.self.key', 'l_31': 'bert.encoder.11.attention.self.value', 'l_32': 'bert.encoder.11.attention.self.softmax', 'l_33': 'bert.encoder.11.attention.self.dropout'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = (x1 + x0) t_0 = self.l_0(t_0) t_1 = self.l_1(t_0) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_0 = (t_1 + t_0) t_0 = self.l_4(t_0) t_1 = self.l_5(t_0) t_2 = self.l_6(t_0) t_3 = self.l_7(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_0 = (t_4 + t_0) t_0 = self.l_12(t_0) t_4 = self.l_13(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_0 = (t_4 + t_0) t_0 = self.l_16(t_0) t_4 = self.l_17(t_0) t_9 = self.l_18(t_0) t_5 = self.l_19(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_20(t_6) t_6 = self.l_21(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_0 = (t_6 + t_0) t_0 = self.l_24(t_0) t_6 = self.l_25(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_0 = (t_6 + t_0) t_0 = self.l_28(t_0) t_6 = self.l_29(t_0) t_1 = self.l_30(t_0) t_8 = self.l_31(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_32(t_7) t_7 = self.l_33(t_7) t_3 = torch.matmul(t_7, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_3 = t_3.contiguous() t_7 = t_3.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_2 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_3.view(t_2, t_4, t_7) return list(flatten((t_0, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'bert.encoder.11.attention.output.dense', 'l_1': 'bert.encoder.11.attention.output.dropout', 'l_2': 'bert.encoder.11.attention.output.LayerNorm', 'l_3': 'bert.encoder.11.intermediate.dense', 'l_4': 'bert.encoder.11.output.dense', 'l_5': 'bert.encoder.11.output.dropout', 'l_6': 'bert.encoder.11.output.LayerNorm', 'l_7': 'bert.encoder.12.attention.self.query', 'l_8': 'bert.encoder.12.attention.self.key', 'l_9': 'bert.encoder.12.attention.self.value', 'l_10': 'bert.encoder.12.attention.self.softmax', 'l_11': 'bert.encoder.12.attention.self.dropout', 'l_12': 'bert.encoder.12.attention.output.dense', 'l_13': 'bert.encoder.12.attention.output.dropout', 'l_14': 'bert.encoder.12.attention.output.LayerNorm', 'l_15': 'bert.encoder.12.intermediate.dense', 'l_16': 'bert.encoder.12.output.dense', 'l_17': 'bert.encoder.12.output.dropout', 'l_18': 'bert.encoder.12.output.LayerNorm', 'l_19': 'bert.encoder.13.attention.self.query', 'l_20': 'bert.encoder.13.attention.self.key', 'l_21': 'bert.encoder.13.attention.self.value', 'l_22': 'bert.encoder.13.attention.self.softmax', 'l_23': 'bert.encoder.13.attention.self.dropout', 'l_24': 'bert.encoder.13.attention.output.dense', 'l_25': 'bert.encoder.13.attention.output.dropout', 'l_26': 'bert.encoder.13.attention.output.LayerNorm', 'l_27': 'bert.encoder.13.intermediate.dense', 'l_28': 'bert.encoder.13.output.dense', 'l_29': 'bert.encoder.13.output.dropout', 'l_30': 'bert.encoder.13.output.LayerNorm', 'l_31': 'bert.encoder.14.attention.self.query', 'l_32': 'bert.encoder.14.attention.self.key', 'l_33': 'bert.encoder.14.attention.self.value', 'l_34': 'bert.encoder.14.attention.self.softmax'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = self.l_1(t_0) t_0 = (t_0 + x0) t_0 = self.l_2(t_0) t_1 = self.l_3(t_0) t_1 = torch.nn.functional.gelu(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_0 = (t_1 + t_0) t_0 = self.l_6(t_0) t_1 = self.l_7(t_0) t_2 = self.l_8(t_0) t_3 = self.l_9(t_0) t_4 = t_1.size() t_5 = t_2.size() t_6 = t_3.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_1.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_3.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_10(t_4) t_4 = self.l_11(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_0 = (t_4 + t_0) t_0 = self.l_14(t_0) t_4 = self.l_15(t_0) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_16(t_4) t_4 = self.l_17(t_4) t_0 = (t_4 + t_0) t_0 = self.l_18(t_0) t_4 = self.l_19(t_0) t_9 = self.l_20(t_0) t_5 = self.l_21(t_0) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_3 = t_6[0] t_2 = t_6[1] t_1 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_3, t_2, t_1, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_1 = t_8[0] t_2 = t_8[1] t_3 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_1, t_2, t_3, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_3 = t_7[0] t_2 = t_7[1] t_1 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_3, t_2, t_1, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_22(t_6) t_6 = self.l_23(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_1 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_1, t_6) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_0 = (t_6 + t_0) t_0 = self.l_26(t_0) t_6 = self.l_27(t_0) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_28(t_6) t_6 = self.l_29(t_6) t_0 = (t_6 + t_0) t_0 = self.l_30(t_0) t_6 = self.l_31(t_0) t_1 = self.l_32(t_0) t_8 = self.l_33(t_0) t_7 = t_6.size() t_2 = t_1.size() t_3 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_1.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_3 = t_3[slice(None, (- 1), None)] t_3 = (t_3 + (16, 64)) t_5 = t_3[0] t_9 = t_3[1] t_4 = t_3[2] t_3 = t_3[3] t_3 = t_8.view(t_5, t_9, t_4, t_3) t_3 = t_3.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_34(t_7) return list(flatten((t_0, t_3, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'bert.encoder.14.attention.self.dropout', 'l_1': 'bert.encoder.14.attention.output.dense', 'l_2': 'bert.encoder.14.attention.output.dropout', 'l_3': 'bert.encoder.14.attention.output.LayerNorm', 'l_4': 'bert.encoder.14.intermediate.dense', 'l_5': 'bert.encoder.14.output.dense', 'l_6': 'bert.encoder.14.output.dropout', 'l_7': 'bert.encoder.14.output.LayerNorm', 'l_8': 'bert.encoder.15.attention.self.query', 'l_9': 'bert.encoder.15.attention.self.key', 'l_10': 'bert.encoder.15.attention.self.value', 'l_11': 'bert.encoder.15.attention.self.softmax', 'l_12': 'bert.encoder.15.attention.self.dropout', 'l_13': 'bert.encoder.15.attention.output.dense', 'l_14': 'bert.encoder.15.attention.output.dropout', 'l_15': 'bert.encoder.15.attention.output.LayerNorm', 'l_16': 'bert.encoder.15.intermediate.dense', 'l_17': 'bert.encoder.15.output.dense', 'l_18': 'bert.encoder.15.output.dropout', 'l_19': 'bert.encoder.15.output.LayerNorm', 'l_20': 'bert.encoder.16.attention.self.query', 'l_21': 'bert.encoder.16.attention.self.key', 'l_22': 'bert.encoder.16.attention.self.value', 'l_23': 'bert.encoder.16.attention.self.softmax', 'l_24': 'bert.encoder.16.attention.self.dropout', 'l_25': 'bert.encoder.16.attention.output.dense', 'l_26': 'bert.encoder.16.attention.output.dropout', 'l_27': 'bert.encoder.16.attention.output.LayerNorm', 'l_28': 'bert.encoder.16.intermediate.dense', 'l_29': 'bert.encoder.16.output.dense', 'l_30': 'bert.encoder.16.output.dropout', 'l_31': 'bert.encoder.16.output.LayerNorm', 'l_32': 'bert.encoder.17.attention.self.query', 'l_33': 'bert.encoder.17.attention.self.key', 'l_34': 'bert.encoder.17.attention.self.value'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x2) t_0 = torch.matmul(t_0, x1) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_2, t_3, t_1) t_1 = self.l_1(t_1) t_1 = self.l_2(t_1) t_1 = (t_1 + x0) t_1 = self.l_3(t_1) t_3 = self.l_4(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_5(t_3) t_3 = self.l_6(t_3) t_1 = (t_3 + t_1) t_1 = self.l_7(t_1) t_3 = self.l_8(t_1) t_2 = self.l_9(t_1) t_0 = self.l_10(t_1) t_4 = t_3.size() t_5 = t_2.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_11(t_4) t_4 = self.l_12(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_13(t_4) t_4 = self.l_14(t_4) t_1 = (t_4 + t_1) t_1 = self.l_15(t_1) t_4 = self.l_16(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_17(t_4) t_4 = self.l_18(t_4) t_1 = (t_4 + t_1) t_1 = self.l_19(t_1) t_4 = self.l_20(t_1) t_9 = self.l_21(t_1) t_5 = self.l_22(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_2 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_2, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_2 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_2, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_2 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_2, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_23(t_6) t_6 = self.l_24(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_25(t_6) t_6 = self.l_26(t_6) t_1 = (t_6 + t_1) t_1 = self.l_27(t_1) t_6 = self.l_28(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_29(t_6) t_6 = self.l_30(t_6) t_1 = (t_6 + t_1) t_1 = self.l_31(t_1) t_6 = self.l_32(t_1) t_3 = self.l_33(t_1) t_8 = self.l_34(t_1) t_7 = t_6.size() t_2 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_2 = t_2.transpose((- 1), (- 2)) t_2 = torch.matmul(t_7, t_2) t_7 = math.sqrt(64) t_7 = (t_2 / t_7) return list(flatten((t_1, t_0, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'bert.encoder.17.attention.self.softmax', 'l_1': 'bert.encoder.17.attention.self.dropout', 'l_2': 'bert.encoder.17.attention.output.dense', 'l_3': 'bert.encoder.17.attention.output.dropout', 'l_4': 'bert.encoder.17.attention.output.LayerNorm', 'l_5': 'bert.encoder.17.intermediate.dense', 'l_6': 'bert.encoder.17.output.dense', 'l_7': 'bert.encoder.17.output.dropout', 'l_8': 'bert.encoder.17.output.LayerNorm', 'l_9': 'bert.encoder.18.attention.self.query', 'l_10': 'bert.encoder.18.attention.self.key', 'l_11': 'bert.encoder.18.attention.self.value', 'l_12': 'bert.encoder.18.attention.self.softmax', 'l_13': 'bert.encoder.18.attention.self.dropout', 'l_14': 'bert.encoder.18.attention.output.dense', 'l_15': 'bert.encoder.18.attention.output.dropout', 'l_16': 'bert.encoder.18.attention.output.LayerNorm', 'l_17': 'bert.encoder.18.intermediate.dense', 'l_18': 'bert.encoder.18.output.dense', 'l_19': 'bert.encoder.18.output.dropout', 'l_20': 'bert.encoder.18.output.LayerNorm', 'l_21': 'bert.encoder.19.attention.self.query', 'l_22': 'bert.encoder.19.attention.self.key', 'l_23': 'bert.encoder.19.attention.self.value', 'l_24': 'bert.encoder.19.attention.self.softmax', 'l_25': 'bert.encoder.19.attention.self.dropout', 'l_26': 'bert.encoder.19.attention.output.dense', 'l_27': 'bert.encoder.19.attention.output.dropout', 'l_28': 'bert.encoder.19.attention.output.LayerNorm', 'l_29': 'bert.encoder.19.intermediate.dense', 'l_30': 'bert.encoder.19.output.dense', 'l_31': 'bert.encoder.19.output.dropout', 'l_32': 'bert.encoder.19.output.LayerNorm', 'l_33': 'bert.encoder.20.attention.self.query', 'l_34': 'bert.encoder.20.attention.self.key', 'l_35': 'bert.encoder.20.attention.self.value'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2) = unflatten(args, self.input_structure) t_0 = (x2 + attention_mask) t_0 = self.l_0(t_0) t_0 = self.l_1(t_0) t_0 = torch.matmul(t_0, x1) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_1 = t_0.size() t_1 = t_1[slice(None, (- 2), None)] t_1 = (t_1 + (1024,)) t_2 = t_1[0] t_3 = t_1[1] t_1 = t_1[2] t_1 = t_0.view(t_2, t_3, t_1) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_1 = (t_1 + x0) t_1 = self.l_4(t_1) t_3 = self.l_5(t_1) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_6(t_3) t_3 = self.l_7(t_3) t_1 = (t_3 + t_1) t_1 = self.l_8(t_1) t_3 = self.l_9(t_1) t_2 = self.l_10(t_1) t_0 = self.l_11(t_1) t_4 = t_3.size() t_5 = t_2.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_2.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_1 = (t_4 + t_1) t_1 = self.l_16(t_1) t_4 = self.l_17(t_1) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_1 = (t_4 + t_1) t_1 = self.l_20(t_1) t_4 = self.l_21(t_1) t_9 = self.l_22(t_1) t_5 = self.l_23(t_1) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_2 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_2, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_2 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_2, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_2 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_2, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_1 = (t_6 + t_1) t_1 = self.l_28(t_1) t_6 = self.l_29(t_1) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_30(t_6) t_6 = self.l_31(t_6) t_1 = (t_6 + t_1) t_1 = self.l_32(t_1) t_6 = self.l_33(t_1) t_3 = self.l_34(t_1) t_8 = self.l_35(t_1) t_7 = t_6.size() t_2 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_2 = t_2[slice(None, (- 1), None)] t_2 = (t_2 + (16, 64)) t_4 = t_2[0] t_9 = t_2[1] t_5 = t_2[2] t_2 = t_2[3] t_2 = t_3.view(t_4, t_9, t_5, t_2) t_2 = t_2.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) return list(flatten((t_1, t_7, t_2, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'bert.encoder.20.attention.self.softmax', 'l_1': 'bert.encoder.20.attention.self.dropout', 'l_2': 'bert.encoder.20.attention.output.dense', 'l_3': 'bert.encoder.20.attention.output.dropout', 'l_4': 'bert.encoder.20.attention.output.LayerNorm', 'l_5': 'bert.encoder.20.intermediate.dense', 'l_6': 'bert.encoder.20.output.dense', 'l_7': 'bert.encoder.20.output.dropout', 'l_8': 'bert.encoder.20.output.LayerNorm', 'l_9': 'bert.encoder.21.attention.self.query', 'l_10': 'bert.encoder.21.attention.self.key', 'l_11': 'bert.encoder.21.attention.self.value', 'l_12': 'bert.encoder.21.attention.self.softmax', 'l_13': 'bert.encoder.21.attention.self.dropout', 'l_14': 'bert.encoder.21.attention.output.dense', 'l_15': 'bert.encoder.21.attention.output.dropout', 'l_16': 'bert.encoder.21.attention.output.LayerNorm', 'l_17': 'bert.encoder.21.intermediate.dense', 'l_18': 'bert.encoder.21.output.dense', 'l_19': 'bert.encoder.21.output.dropout', 'l_20': 'bert.encoder.21.output.LayerNorm', 'l_21': 'bert.encoder.22.attention.self.query', 'l_22': 'bert.encoder.22.attention.self.key', 'l_23': 'bert.encoder.22.attention.self.value', 'l_24': 'bert.encoder.22.attention.self.softmax', 'l_25': 'bert.encoder.22.attention.self.dropout', 'l_26': 'bert.encoder.22.attention.output.dense', 'l_27': 'bert.encoder.22.attention.output.dropout', 'l_28': 'bert.encoder.22.attention.output.LayerNorm', 'l_29': 'bert.encoder.22.intermediate.dense', 'l_30': 'bert.encoder.22.output.dense', 'l_31': 'bert.encoder.22.output.dropout', 'l_32': 'bert.encoder.22.output.LayerNorm', 'l_33': 'bert.encoder.23.attention.self.query', 'l_34': 'bert.encoder.23.attention.self.key', 'l_35': 'bert.encoder.23.attention.self.value', 'l_36': 'bert.encoder.23.attention.self.softmax', 'l_37': 'bert.encoder.23.attention.self.dropout', 'l_38': 'bert.encoder.23.attention.output.dense', 'l_39': 'bert.encoder.23.attention.output.dropout', 'l_40': 'bert.encoder.23.attention.output.LayerNorm', 'l_41': 'bert.encoder.23.intermediate.dense', 'l_42': 'bert.encoder.23.output.dense', 'l_43': 'bert.encoder.23.output.dropout', 'l_44': 'bert.encoder.23.output.LayerNorm', 'l_45': 'bert.pooler.dense', 'l_46': 'bert.pooler.activation', 'l_47': 'qa_outputs'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = x3.permute(0, 2, 1, 3) t_1 = x2.transpose((- 1), (- 2)) t_1 = torch.matmul(x1, t_1) t_2 = math.sqrt(64) t_2 = (t_1 / t_2) t_2 = (t_2 + attention_mask) t_2 = self.l_0(t_2) t_2 = self.l_1(t_2) t_0 = torch.matmul(t_2, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_2 = t_0.size() t_2 = t_2[slice(None, (- 2), None)] t_2 = (t_2 + (1024,)) t_1 = t_2[0] t_3 = t_2[1] t_2 = t_2[2] t_2 = t_0.view(t_1, t_3, t_2) t_2 = self.l_2(t_2) t_2 = self.l_3(t_2) t_2 = (t_2 + x0) t_2 = self.l_4(t_2) t_3 = self.l_5(t_2) t_3 = torch.nn.functional.gelu(t_3) t_3 = self.l_6(t_3) t_3 = self.l_7(t_3) t_2 = (t_3 + t_2) t_2 = self.l_8(t_2) t_3 = self.l_9(t_2) t_1 = self.l_10(t_2) t_0 = self.l_11(t_2) t_4 = t_3.size() t_5 = t_1.size() t_6 = t_0.size() t_4 = t_4[slice(None, (- 1), None)] t_4 = (t_4 + (16, 64)) t_7 = t_4[0] t_8 = t_4[1] t_9 = t_4[2] t_4 = t_4[3] t_4 = t_3.view(t_7, t_8, t_9, t_4) t_4 = t_4.permute(0, 2, 1, 3) t_5 = t_5[slice(None, (- 1), None)] t_5 = (t_5 + (16, 64)) t_9 = t_5[0] t_8 = t_5[1] t_7 = t_5[2] t_5 = t_5[3] t_5 = t_1.view(t_9, t_8, t_7, t_5) t_5 = t_5.permute(0, 2, 1, 3) t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_7 = t_6[0] t_8 = t_6[1] t_9 = t_6[2] t_6 = t_6[3] t_6 = t_0.view(t_7, t_8, t_9, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_5 = t_5.transpose((- 1), (- 2)) t_5 = torch.matmul(t_4, t_5) t_4 = math.sqrt(64) t_4 = (t_5 / t_4) t_4 = (t_4 + attention_mask) t_4 = self.l_12(t_4) t_4 = self.l_13(t_4) t_6 = torch.matmul(t_4, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_6 = t_6.contiguous() t_4 = t_6.size() t_4 = t_4[slice(None, (- 2), None)] t_4 = (t_4 + (1024,)) t_5 = t_4[0] t_9 = t_4[1] t_4 = t_4[2] t_4 = t_6.view(t_5, t_9, t_4) t_4 = self.l_14(t_4) t_4 = self.l_15(t_4) t_2 = (t_4 + t_2) t_2 = self.l_16(t_2) t_4 = self.l_17(t_2) t_4 = torch.nn.functional.gelu(t_4) t_4 = self.l_18(t_4) t_4 = self.l_19(t_4) t_2 = (t_4 + t_2) t_2 = self.l_20(t_2) t_4 = self.l_21(t_2) t_9 = self.l_22(t_2) t_5 = self.l_23(t_2) t_6 = t_4.size() t_8 = t_9.size() t_7 = t_5.size() t_6 = t_6[slice(None, (- 1), None)] t_6 = (t_6 + (16, 64)) t_0 = t_6[0] t_1 = t_6[1] t_3 = t_6[2] t_6 = t_6[3] t_6 = t_4.view(t_0, t_1, t_3, t_6) t_6 = t_6.permute(0, 2, 1, 3) t_8 = t_8[slice(None, (- 1), None)] t_8 = (t_8 + (16, 64)) t_3 = t_8[0] t_1 = t_8[1] t_0 = t_8[2] t_8 = t_8[3] t_8 = t_9.view(t_3, t_1, t_0, t_8) t_8 = t_8.permute(0, 2, 1, 3) t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_0 = t_7[0] t_1 = t_7[1] t_3 = t_7[2] t_7 = t_7[3] t_7 = t_5.view(t_0, t_1, t_3, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_8 = t_8.transpose((- 1), (- 2)) t_8 = torch.matmul(t_6, t_8) t_6 = math.sqrt(64) t_6 = (t_8 / t_6) t_6 = (t_6 + attention_mask) t_6 = self.l_24(t_6) t_6 = self.l_25(t_6) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_7 = t_7.contiguous() t_6 = t_7.size() t_6 = t_6[slice(None, (- 2), None)] t_6 = (t_6 + (1024,)) t_8 = t_6[0] t_3 = t_6[1] t_6 = t_6[2] t_6 = t_7.view(t_8, t_3, t_6) t_6 = self.l_26(t_6) t_6 = self.l_27(t_6) t_2 = (t_6 + t_2) t_2 = self.l_28(t_2) t_6 = self.l_29(t_2) t_6 = torch.nn.functional.gelu(t_6) t_6 = self.l_30(t_6) t_6 = self.l_31(t_6) t_2 = (t_6 + t_2) t_2 = self.l_32(t_2) t_6 = self.l_33(t_2) t_3 = self.l_34(t_2) t_8 = self.l_35(t_2) t_7 = t_6.size() t_1 = t_3.size() t_0 = t_8.size() t_7 = t_7[slice(None, (- 1), None)] t_7 = (t_7 + (16, 64)) t_5 = t_7[0] t_9 = t_7[1] t_4 = t_7[2] t_7 = t_7[3] t_7 = t_6.view(t_5, t_9, t_4, t_7) t_7 = t_7.permute(0, 2, 1, 3) t_1 = t_1[slice(None, (- 1), None)] t_1 = (t_1 + (16, 64)) t_4 = t_1[0] t_9 = t_1[1] t_5 = t_1[2] t_1 = t_1[3] t_1 = t_3.view(t_4, t_9, t_5, t_1) t_1 = t_1.permute(0, 2, 1, 3) t_0 = t_0[slice(None, (- 1), None)] t_0 = (t_0 + (16, 64)) t_5 = t_0[0] t_9 = t_0[1] t_4 = t_0[2] t_0 = t_0[3] t_0 = t_8.view(t_5, t_9, t_4, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_1 = t_1.transpose((- 1), (- 2)) t_1 = torch.matmul(t_7, t_1) t_7 = math.sqrt(64) t_7 = (t_1 / t_7) t_7 = (t_7 + attention_mask) t_7 = self.l_36(t_7) t_7 = self.l_37(t_7) t_0 = torch.matmul(t_7, t_0) t_0 = t_0.permute(0, 2, 1, 3) t_0 = t_0.contiguous() t_7 = t_0.size() t_7 = t_7[slice(None, (- 2), None)] t_7 = (t_7 + (1024,)) t_1 = t_7[0] t_4 = t_7[1] t_7 = t_7[2] t_7 = t_0.view(t_1, t_4, t_7) t_7 = self.l_38(t_7) t_7 = self.l_39(t_7) t_2 = (t_7 + t_2) t_2 = self.l_40(t_2) t_7 = self.l_41(t_2) t_7 = torch.nn.functional.gelu(t_7) t_7 = self.l_42(t_7) t_7 = self.l_43(t_7) t_2 = (t_7 + t_2) t_2 = self.l_44(t_2) t_7 = self.l_47(t_2) t_2 = t_2[(slice(None, None, None), 0)] t_2 = self.l_45(t_2) t_2 = self.l_46(t_2) return (t_7,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)