code
stringlengths
17
6.64M
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True)
def create_pipeline_configuration(DEBUG=False, batch_size=64): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Linear, T5Block, Dropout, StatelessEmbedding, T5LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 6]}, 'decoder_attention_mask': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [6]}, 'decoder_input_ids': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 6]}, 'input_ids': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [7]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::size_408': {'shape': torch.Size([2]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___79': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___81': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___79': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___81': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___139': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___141': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___139': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___141': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___259': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___261': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___259': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___261': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___319': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___321': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___319': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___321': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___364': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___366': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([64, 64]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::size_408': {'shape': torch.Size([2]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'created_by': 0}, 'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___364': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___366': {'shape': torch.Size([64, 32, 64, 64]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___577': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___579': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___581': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'labels': {'shape': torch.Size([64, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([64, 64, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440': {'shape': torch.Size([64, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448': {'shape': torch.Size([64, 1, 1, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___577': {'shape': torch.Size([64, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___579': {'shape': torch.Size([64, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___581': {'shape': torch.Size([64, 32, 4, 64]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]'] TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0', 'l_3': 'encoder.block.1', 'l_4': 'encoder.block.2', 'l_5': 'encoder.block.3', 'p_0': 'shared_embed_weight'} self.to(self.device) def forward(self, *args): (attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure) t_0 = decoder_input_ids.size() t_1 = input_ids.size() t_1 = t_1[(- 1)] t_1 = input_ids.view((- 1), t_1) t_1 = self.l_0(self.p_0, t_1) t_1 = self.l_1(t_1) t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_2 = t_2.to(dtype=torch.float32) t_2 = (1.0 - t_2) t_2 = (t_2 * (- 10000.0)) t_1 = self.l_2(t_1, attention_mask=t_2, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_3 = t_1[slice(None, 2, None)] t_3 = t_3[0] t_1 = t_1[2] t_1 = self.l_3(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_3 = t_1[slice(None, 2, None)] t_3 = t_3[0] t_1 = t_1[2] t_1 = self.l_4(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_3 = t_1[slice(None, 2, None)] t_3 = t_3[0] t_1 = t_1[2] t_1 = self.l_5(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_3 = t_1[slice(None, 2, None)] t_3 = t_3[0] t_1 = t_1[2] return list(flatten((t_0, self.p_0, t_2, t_3, t_1))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.4', 'l_1': 'encoder.block.5', 'l_2': 'encoder.block.6', 'l_3': 'encoder.block.7'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] return list(flatten((x0, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.8', 'l_1': 'encoder.block.9', 'l_2': 'encoder.block.10', 'l_3': 'encoder.block.11'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] return list(flatten((x0, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.12', 'l_1': 'encoder.block.13', 'l_2': 'encoder.block.14', 'l_3': 'encoder.block.15'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] return list(flatten((x0, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.16', 'l_1': 'encoder.block.17', 'l_2': 'encoder.block.18', 'l_3': 'encoder.block.19'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] return list(flatten((x0, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.20', 'l_1': 'encoder.block.21', 'l_2': 'encoder.block.22'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0[2] return list(flatten((x0, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1, 1, 1, 1] self.lookup = {'l_0': 'encoder.block.23', 'l_1': 'encoder.final_layer_norm', 'l_2': 'encoder.dropout', 'l_3': 'decoder.embed_tokens', 'l_4': 'decoder.dropout', 'l_5': 'decoder.block.0', 'l_6': 'decoder.block.1', 'l_7': 'decoder.block.2', 'l_8': 'decoder.block.3', 'l_9': 'decoder.block.4', 'l_10': 'decoder.block.5', 'l_11': 'decoder.block.6', 'l_12': 'decoder.block.7', 'l_13': 'decoder.block.8', 'l_14': 'decoder.block.9'} self.to(self.device) def forward(self, *args): (attention_mask, decoder_attention_mask, decoder_input_ids, x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = self.l_0(x3, attention_mask=x2, position_bias=x4, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_1 = self.l_1(t_1) t_0 = t_0[2] t_1 = self.l_2(t_1) t_2 = x0[(- 1)] t_2 = decoder_input_ids.view((- 1), t_2) t_2 = self.l_3(x1, t_2) t_2 = self.l_4(t_2) t_3 = x0[0] t_4 = x0[1] t_5 = torch.arange(t_4, device=self.device) t_6 = t_5[(None, None, slice(None, None, None))] t_4 = t_6.repeat(t_3, t_4, 1) t_5 = t_5[(None, slice(None, None, None), None)] t_5 = (t_4 <= t_5) t_4 = decoder_attention_mask.dtype t_4 = t_5.to(t_4) t_4 = t_4[(slice(None, None, None), None, slice(None, None, None), slice(None, None, None))] t_5 = decoder_attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_5 = (t_4 * t_5) t_5 = t_5.to(dtype=torch.float32) t_5 = (1.0 - t_5) t_5 = (t_5 * (- 10000.0)) t_4 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_4 = t_4.to(dtype=torch.float32) t_4 = (1.0 - t_4) t_4 = (t_4 * (- 1000000000.0)) t_2 = self.l_5(t_2, attention_mask=t_5, position_bias=None, encoder_attention_mask=t_4, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_6 = t_2[2] t_2 = t_2[3] t_2 = self.l_6(t_3, attention_mask=t_5, position_bias=t_6, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_3 = t_2[2] t_2 = t_2[3] t_2 = self.l_7(t_6, attention_mask=t_5, position_bias=t_3, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_6 = t_2[2] t_2 = t_2[3] t_2 = self.l_8(t_3, attention_mask=t_5, position_bias=t_6, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_3 = t_2[2] t_2 = t_2[3] t_2 = self.l_9(t_6, attention_mask=t_5, position_bias=t_3, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_6 = t_2[2] t_2 = t_2[3] t_2 = self.l_10(t_3, attention_mask=t_5, position_bias=t_6, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_3 = t_2[2] t_2 = t_2[3] t_2 = self.l_11(t_6, attention_mask=t_5, position_bias=t_3, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_6 = t_2[2] t_2 = t_2[3] t_2 = self.l_12(t_3, attention_mask=t_5, position_bias=t_6, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_3 = t_2[2] t_2 = t_2[3] t_2 = self.l_13(t_6, attention_mask=t_5, position_bias=t_3, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_6 = t_2[2] t_2 = t_2[3] t_2 = self.l_14(t_3, attention_mask=t_5, position_bias=t_6, encoder_attention_mask=t_4, encoder_decoder_position_bias=t_2, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_1) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_3 = t_2[2] t_2 = t_2[3] return list(flatten((t_1, t_5, t_4, t_6, t_3, t_2))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.10', 'l_1': 'decoder.block.11', 'l_2': 'decoder.block.12', 'l_3': 'decoder.block.13', 'l_4': 'decoder.block.14', 'l_5': 'decoder.block.15', 'l_6': 'decoder.block.16', 'l_7': 'decoder.block.17', 'l_8': 'decoder.block.18', 'l_9': 'decoder.block.19', 'l_10': 'decoder.block.20', 'l_11': 'decoder.block.21', 'l_12': 'decoder.block.22', 'l_13': 'decoder.block.23', 'l_14': 'decoder.final_layer_norm', 'l_15': 'decoder.dropout', 'l_16': 'lm_head'} self.to(self.device) def forward(self, *args): (labels, x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure) t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_3(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_4(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_5(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_6(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_7(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_8(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_9(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_10(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_11(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_0[2] t_0 = t_0[3] t_0 = self.l_12(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = t_0[2] t_0 = t_0[3] t_0 = self.l_13(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0) t_2 = t_0[slice(None, 2, None)] t_2 = t_2[0] t_2 = self.l_14(t_2) t_1 = t_0[2] t_0 = t_0[3] t_2 = self.l_15(t_2) t_2 = (t_2 * 0.03125) t_2 = self.l_16(t_2) t_3 = t_2.size((- 1)) t_3 = t_2.view((- 1), t_3) t_2 = labels.view((- 1)) t_2 = torch.nn.functional.cross_entropy(t_3, t_2, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean') return (t_2,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True)
def create_pipeline_configuration(DEBUG=False, batch_size=8): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (T5LayerNorm, Linear, Dropout, StatelessEmbedding, Embedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([8, 320, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([8, 8, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([8, 8, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([8, 320, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Embedding[relative_attention_bias]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]'] TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0.layer.0.layer_norm', 'l_3': 'encoder.block.0.layer.0.SelfAttention.q', 'l_4': 'encoder.block.0.layer.0.SelfAttention.k', 'l_5': 'encoder.block.0.layer.0.SelfAttention.v', 'l_6': 'encoder.block.0.layer.0.SelfAttention.relative_attention_bias', 'l_7': 'encoder.block.0.layer.0.SelfAttention.o', 'l_8': 'encoder.block.0.layer.0.dropout', 'l_9': 'encoder.block.0.layer.1.layer_norm', 'l_10': 'encoder.block.0.layer.1.DenseReluDense.wi', 'l_11': 'encoder.block.0.layer.1.DenseReluDense.dropout', 'l_12': 'encoder.block.0.layer.1.DenseReluDense.wo', 'l_13': 'encoder.block.0.layer.1.dropout', 'l_14': 'encoder.block.1.layer.0.layer_norm', 'l_15': 'encoder.block.1.layer.0.SelfAttention.q', 'l_16': 'encoder.block.1.layer.0.SelfAttention.k', 'l_17': 'encoder.block.1.layer.0.SelfAttention.v', 'l_18': 'encoder.block.1.layer.0.SelfAttention.o', 'l_19': 'encoder.block.1.layer.0.dropout', 'l_20': 'encoder.block.1.layer.1.layer_norm', 'l_21': 'encoder.block.1.layer.1.DenseReluDense.wi', 'l_22': 'encoder.block.1.layer.1.DenseReluDense.dropout', 'l_23': 'encoder.block.1.layer.1.DenseReluDense.wo', 'l_24': 'encoder.block.1.layer.1.dropout', 'l_25': 'encoder.block.2.layer.0.layer_norm', 'l_26': 'encoder.block.2.layer.0.SelfAttention.q', 'l_27': 'encoder.block.2.layer.0.SelfAttention.k', 'l_28': 'encoder.block.2.layer.0.SelfAttention.v', 'l_29': 'encoder.block.2.layer.0.SelfAttention.o', 'l_30': 'encoder.block.2.layer.0.dropout', 'l_31': 'encoder.block.2.layer.1.layer_norm', 'l_32': 'encoder.block.2.layer.1.DenseReluDense.wi', 'l_33': 'encoder.block.2.layer.1.DenseReluDense.dropout', 'l_34': 'encoder.block.2.layer.1.DenseReluDense.wo', 'l_35': 'encoder.block.2.layer.1.dropout', 'l_36': 'decoder.embed_tokens', 'l_37': 'decoder.dropout', 'p_0': 'shared_embed_weight'} self.to(self.device) def forward(self, *args): (attention_mask, decoder_attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure) t_0 = decoder_input_ids.size() t_1 = input_ids.size() t_1 = t_1[(- 1)] t_1 = input_ids.view((- 1), t_1) t_1 = self.l_0(self.p_0, t_1) t_1 = self.l_1(t_1) t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_2 = t_2.to(dtype=torch.float32) t_2 = (1.0 - t_2) t_2 = (t_2 * (- 10000.0)) t_3 = self.l_2(t_1) t_4 = self.l_3(t_3) t_5 = self.l_4(t_3) t_6 = self.l_5(t_3) t_3 = t_3.shape t_3 = t_3[slice(None, 2, None)] t_7 = t_3[0] t_3 = t_3[1] t_4 = t_4.view(t_7, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_7, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_7, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_4, t_5) t_4 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_4 = t_4[(slice(None, None, None), None)] t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_3 = t_3[(None, slice(None, None, None))] t_4 = (t_3 - t_4) t_3 = torch.abs(t_4) t_4 = (t_4 > 0) t_4 = t_4.to(torch.int64) t_4 = (t_4 * 16) t_4 = (0 + t_4) t_8 = t_3.float() t_9 = (t_3 < 8) t_8 = (t_8 / 8) t_8 = torch.log(t_8) t_10 = math.log(16.0) t_10 = (t_8 / t_10) t_10 = (t_10 * 8) t_10 = t_10.to(torch.int64) t_10 = (8 + t_10) t_8 = torch.full_like(t_10, 15, device=self.device) t_8 = torch.min(t_10, t_8) t_8 = torch.where(t_9, t_3, t_8) t_4 += t_8 t_8 = t_4 t_8 = t_8.to(self.device) t_8 = self.l_6(t_8) t_8 = t_8.permute([2, 0, 1]) t_8 = t_8.unsqueeze(0) t_2 = (t_8 + t_2) t_5 += t_2 t_8 = t_5.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_8.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_7 = t_6.view(t_7, (- 1), 4096) t_7 = self.l_7(t_7) t_6 = self.l_8(t_7) t_6 = (t_1 + t_6) t_2 = (t_7, None, t_2) t_7 = t_2[0] t_6 = (t_6,) t_2 = t_2[slice(1, None, None)] t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_1 = t_6[0] t_5 = self.l_9(t_1) t_6 = t_6[1] t_2 = t_2[slice(2, None, None)] t_5 = self.l_10(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_11(t_5) t_5 = self.l_12(t_5) t_5 = self.l_13(t_5) t_5 = (t_1 + t_5) t_6 = (t_5, t_6) t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_14(t_6) t_2 = t_2[2] t_1 = self.l_15(t_5) t_8 = self.l_16(t_5) t_4 = self.l_17(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_8 = t_8.view(t_5, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_4 = t_4.view(t_5, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_1, t_8) t_8 += t_2 t_1 = t_8.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_1.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_5 = t_4.view(t_5, (- 1), 4096) t_5 = self.l_18(t_5) t_4 = self.l_19(t_5) t_4 = (t_6 + t_4) t_2 = (t_5, None, t_2) t_5 = t_2[0] t_4 = (t_4,) t_2 = t_2[slice(1, None, None)] t_2 = (t_4 + t_2) t_4 = t_2[slice(None, 2, None)] t_6 = t_4[0] t_8 = self.l_20(t_6) t_4 = t_4[1] t_2 = t_2[slice(2, None, None)] t_8 = self.l_21(t_8) t_8 = torch.nn.functional.relu(t_8, inplace=False) t_8 = self.l_22(t_8) t_8 = self.l_23(t_8) t_8 = self.l_24(t_8) t_8 = (t_6 + t_8) t_4 = (t_8, t_4) t_2 = (t_4 + t_2) t_4 = t_2[slice(None, 2, None)] t_4 = t_4[0] t_8 = self.l_25(t_4) t_2 = t_2[2] t_6 = self.l_26(t_8) t_1 = self.l_27(t_8) t_3 = self.l_28(t_8) t_8 = t_8.shape t_8 = t_8[slice(None, 2, None)] t_8 = t_8[0] t_6 = t_6.view(t_8, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_1 = t_1.view(t_8, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_3 = t_3.view(t_8, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_6, t_1) t_1 += t_2 t_6 = t_1.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_6.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_1, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_8 = t_3.view(t_8, (- 1), 4096) t_8 = self.l_29(t_8) t_3 = self.l_30(t_8) t_3 = (t_4 + t_3) t_2 = (t_8, None, t_2) t_8 = t_2[0] t_3 = (t_3,) t_2 = t_2[slice(1, None, None)] t_2 = (t_3 + t_2) t_3 = t_2[slice(None, 2, None)] t_4 = t_3[0] t_1 = self.l_31(t_4) t_3 = t_3[1] t_2 = t_2[slice(2, None, None)] t_1 = self.l_32(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = (t_4 + t_1) t_3 = (t_1, t_3) t_2 = (t_3 + t_2) t_3 = t_2[slice(None, 2, None)] t_3 = t_3[0] t_2 = t_2[2] t_1 = t_0[(- 1)] t_1 = decoder_input_ids.view((- 1), t_1) t_1 = self.l_36(self.p_0, t_1) t_1 = self.l_37(t_1) t_4 = t_0[0] t_0 = t_0[1] t_6 = torch.arange(t_0, device=self.device) t_9 = t_6[(None, None, slice(None, None, None))] t_0 = t_9.repeat(t_4, t_0, 1) t_6 = t_6[(None, slice(None, None, None), None)] t_6 = (t_0 <= t_6) t_0 = decoder_attention_mask.dtype t_0 = t_6.to(t_0) t_0 = t_0[(slice(None, None, None), None, slice(None, None, None), slice(None, None, None))] t_6 = decoder_attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_6 = (t_0 * t_6) t_6 = t_6.to(dtype=torch.float32) t_6 = (1.0 - t_6) t_6 = (t_6 * (- 10000.0)) return list(flatten((t_3, t_2, t_1, t_6))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.3.layer.0.layer_norm', 'l_1': 'encoder.block.3.layer.0.SelfAttention.q', 'l_2': 'encoder.block.3.layer.0.SelfAttention.k', 'l_3': 'encoder.block.3.layer.0.SelfAttention.v', 'l_4': 'encoder.block.3.layer.0.SelfAttention.o', 'l_5': 'encoder.block.3.layer.0.dropout', 'l_6': 'encoder.block.3.layer.1.layer_norm', 'l_7': 'encoder.block.3.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.3.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.3.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.3.layer.1.dropout', 'l_11': 'encoder.block.4.layer.0.layer_norm', 'l_12': 'encoder.block.4.layer.0.SelfAttention.q', 'l_13': 'encoder.block.4.layer.0.SelfAttention.k', 'l_14': 'encoder.block.4.layer.0.SelfAttention.v', 'l_15': 'encoder.block.4.layer.0.SelfAttention.o', 'l_16': 'encoder.block.4.layer.0.dropout', 'l_17': 'encoder.block.4.layer.1.layer_norm', 'l_18': 'encoder.block.4.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.4.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.4.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.4.layer.1.dropout', 'l_22': 'encoder.block.5.layer.0.layer_norm', 'l_23': 'encoder.block.5.layer.0.SelfAttention.q', 'l_24': 'encoder.block.5.layer.0.SelfAttention.k', 'l_25': 'encoder.block.5.layer.0.SelfAttention.v', 'l_26': 'encoder.block.5.layer.0.SelfAttention.o', 'l_27': 'encoder.block.5.layer.0.dropout', 'l_28': 'encoder.block.5.layer.1.layer_norm', 'l_29': 'encoder.block.5.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.5.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.5.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.5.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.6.layer.0.layer_norm', 'l_1': 'encoder.block.6.layer.0.SelfAttention.q', 'l_2': 'encoder.block.6.layer.0.SelfAttention.k', 'l_3': 'encoder.block.6.layer.0.SelfAttention.v', 'l_4': 'encoder.block.6.layer.0.SelfAttention.o', 'l_5': 'encoder.block.6.layer.0.dropout', 'l_6': 'encoder.block.6.layer.1.layer_norm', 'l_7': 'encoder.block.6.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.6.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.6.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.6.layer.1.dropout', 'l_11': 'encoder.block.7.layer.0.layer_norm', 'l_12': 'encoder.block.7.layer.0.SelfAttention.q', 'l_13': 'encoder.block.7.layer.0.SelfAttention.k', 'l_14': 'encoder.block.7.layer.0.SelfAttention.v', 'l_15': 'encoder.block.7.layer.0.SelfAttention.o', 'l_16': 'encoder.block.7.layer.0.dropout', 'l_17': 'encoder.block.7.layer.1.layer_norm', 'l_18': 'encoder.block.7.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.7.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.7.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.7.layer.1.dropout', 'l_22': 'encoder.block.8.layer.0.layer_norm', 'l_23': 'encoder.block.8.layer.0.SelfAttention.q', 'l_24': 'encoder.block.8.layer.0.SelfAttention.k', 'l_25': 'encoder.block.8.layer.0.SelfAttention.v', 'l_26': 'encoder.block.8.layer.0.SelfAttention.o', 'l_27': 'encoder.block.8.layer.0.dropout', 'l_28': 'encoder.block.8.layer.1.layer_norm', 'l_29': 'encoder.block.8.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.8.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.8.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.8.layer.1.dropout', 'l_33': 'encoder.block.9.layer.0.layer_norm'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_1 = self.l_33(t_7) t_0 = t_0[2] return list(flatten((t_7, t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1] self.lookup = {'l_0': 'encoder.block.9.layer.0.SelfAttention.q', 'l_1': 'encoder.block.9.layer.0.SelfAttention.k', 'l_2': 'encoder.block.9.layer.0.SelfAttention.v', 'l_3': 'encoder.block.9.layer.0.SelfAttention.o', 'l_4': 'encoder.block.9.layer.0.dropout', 'l_5': 'encoder.block.9.layer.1.layer_norm', 'l_6': 'encoder.block.9.layer.1.DenseReluDense.wi', 'l_7': 'encoder.block.9.layer.1.DenseReluDense.dropout', 'l_8': 'encoder.block.9.layer.1.DenseReluDense.wo', 'l_9': 'encoder.block.9.layer.1.dropout', 'l_10': 'encoder.block.10.layer.0.layer_norm', 'l_11': 'encoder.block.10.layer.0.SelfAttention.q', 'l_12': 'encoder.block.10.layer.0.SelfAttention.k', 'l_13': 'encoder.block.10.layer.0.SelfAttention.v', 'l_14': 'encoder.block.10.layer.0.SelfAttention.o', 'l_15': 'encoder.block.10.layer.0.dropout', 'l_16': 'encoder.block.10.layer.1.layer_norm', 'l_17': 'encoder.block.10.layer.1.DenseReluDense.wi', 'l_18': 'encoder.block.10.layer.1.DenseReluDense.dropout', 'l_19': 'encoder.block.10.layer.1.DenseReluDense.wo', 'l_20': 'encoder.block.10.layer.1.dropout', 'l_21': 'encoder.block.11.layer.0.layer_norm', 'l_22': 'encoder.block.11.layer.0.SelfAttention.q', 'l_23': 'encoder.block.11.layer.0.SelfAttention.k', 'l_24': 'encoder.block.11.layer.0.SelfAttention.v', 'l_25': 'encoder.block.11.layer.0.SelfAttention.o', 'l_26': 'encoder.block.11.layer.0.dropout', 'l_27': 'encoder.block.11.layer.1.layer_norm', 'l_28': 'encoder.block.11.layer.1.DenseReluDense.wi', 'l_29': 'encoder.block.11.layer.1.DenseReluDense.dropout', 'l_30': 'encoder.block.11.layer.1.DenseReluDense.wo', 'l_31': 'encoder.block.11.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_1 = self.l_1(x1) t_2 = self.l_2(x1) t_3 = x1.shape t_3 = t_3[slice(None, 2, None)] t_3 = t_3[0] t_0 = t_0.view(t_3, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_3, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_3, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_0, t_1) t_1 += x2 t_0 = t_1.float() t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_0.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_2 = torch.matmul(t_1, t_2) t_2 = t_2.transpose(1, 2) t_2 = t_2.contiguous() t_3 = t_2.view(t_3, (- 1), 4096) t_3 = self.l_3(t_3) t_2 = self.l_4(t_3) t_2 = (x0 + t_2) t_3 = (t_3, None, x2) t_1 = t_3[0] t_2 = (t_2,) t_3 = t_3[slice(1, None, None)] t_3 = (t_2 + t_3) t_2 = t_3[slice(None, 2, None)] t_0 = t_2[0] t_4 = self.l_5(t_0) t_2 = t_2[1] t_3 = t_3[slice(2, None, None)] t_4 = self.l_6(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_7(t_4) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = (t_0 + t_4) t_2 = (t_4, t_2) t_3 = (t_2 + t_3) t_2 = t_3[slice(None, 2, None)] t_2 = t_2[0] t_4 = self.l_10(t_2) t_3 = t_3[2] t_0 = self.l_11(t_4) t_5 = self.l_12(t_4) t_6 = self.l_13(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_0 = t_0.view(t_4, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_0, t_5) t_5 += t_3 t_0 = t_5.float() t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_0.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_14(t_4) t_6 = self.l_15(t_4) t_6 = (t_2 + t_6) t_3 = (t_4, None, t_3) t_4 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_2 = t_6[0] t_5 = self.l_16(t_2) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_5 = self.l_17(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_18(t_5) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = (t_2 + t_5) t_6 = (t_5, t_6) t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_21(t_6) t_3 = t_3[2] t_2 = self.l_22(t_5) t_0 = self.l_23(t_5) t_7 = self.l_24(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_2 = t_2.view(t_5, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_0 = t_0.view(t_5, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_2, t_0) t_0 += t_3 t_2 = t_0.float() t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_2.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_0, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_25(t_5) t_7 = self.l_26(t_5) t_7 = (t_6 + t_7) t_3 = (t_5, None, t_3) t_5 = t_3[0] t_7 = (t_7,) t_3 = t_3[slice(1, None, None)] t_3 = (t_7 + t_3) t_7 = t_3[slice(None, 2, None)] t_6 = t_7[0] t_0 = self.l_27(t_6) t_7 = t_7[1] t_3 = t_3[slice(2, None, None)] t_0 = self.l_28(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_29(t_0) t_0 = self.l_30(t_0) t_0 = self.l_31(t_0) t_0 = (t_6 + t_0) t_7 = (t_0, t_7) t_3 = (t_7 + t_3) t_7 = t_3[slice(None, 2, None)] t_7 = t_7[0] t_3 = t_3[2] return list(flatten((t_7, t_3))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.12.layer.0.layer_norm', 'l_1': 'encoder.block.12.layer.0.SelfAttention.q', 'l_2': 'encoder.block.12.layer.0.SelfAttention.k', 'l_3': 'encoder.block.12.layer.0.SelfAttention.v', 'l_4': 'encoder.block.12.layer.0.SelfAttention.o', 'l_5': 'encoder.block.12.layer.0.dropout', 'l_6': 'encoder.block.12.layer.1.layer_norm', 'l_7': 'encoder.block.12.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.12.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.12.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.12.layer.1.dropout', 'l_11': 'encoder.block.13.layer.0.layer_norm', 'l_12': 'encoder.block.13.layer.0.SelfAttention.q', 'l_13': 'encoder.block.13.layer.0.SelfAttention.k', 'l_14': 'encoder.block.13.layer.0.SelfAttention.v', 'l_15': 'encoder.block.13.layer.0.SelfAttention.o', 'l_16': 'encoder.block.13.layer.0.dropout', 'l_17': 'encoder.block.13.layer.1.layer_norm', 'l_18': 'encoder.block.13.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.13.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.13.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.13.layer.1.dropout', 'l_22': 'encoder.block.14.layer.0.layer_norm', 'l_23': 'encoder.block.14.layer.0.SelfAttention.q', 'l_24': 'encoder.block.14.layer.0.SelfAttention.k', 'l_25': 'encoder.block.14.layer.0.SelfAttention.v', 'l_26': 'encoder.block.14.layer.0.SelfAttention.o', 'l_27': 'encoder.block.14.layer.0.dropout', 'l_28': 'encoder.block.14.layer.1.layer_norm', 'l_29': 'encoder.block.14.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.14.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.14.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.14.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.15.layer.0.layer_norm', 'l_1': 'encoder.block.15.layer.0.SelfAttention.q', 'l_2': 'encoder.block.15.layer.0.SelfAttention.k', 'l_3': 'encoder.block.15.layer.0.SelfAttention.v', 'l_4': 'encoder.block.15.layer.0.SelfAttention.o', 'l_5': 'encoder.block.15.layer.0.dropout', 'l_6': 'encoder.block.15.layer.1.layer_norm', 'l_7': 'encoder.block.15.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.15.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.15.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.15.layer.1.dropout', 'l_11': 'encoder.block.16.layer.0.layer_norm', 'l_12': 'encoder.block.16.layer.0.SelfAttention.q', 'l_13': 'encoder.block.16.layer.0.SelfAttention.k', 'l_14': 'encoder.block.16.layer.0.SelfAttention.v', 'l_15': 'encoder.block.16.layer.0.SelfAttention.o', 'l_16': 'encoder.block.16.layer.0.dropout', 'l_17': 'encoder.block.16.layer.1.layer_norm', 'l_18': 'encoder.block.16.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.16.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.16.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.16.layer.1.dropout', 'l_22': 'encoder.block.17.layer.0.layer_norm', 'l_23': 'encoder.block.17.layer.0.SelfAttention.q', 'l_24': 'encoder.block.17.layer.0.SelfAttention.k', 'l_25': 'encoder.block.17.layer.0.SelfAttention.v', 'l_26': 'encoder.block.17.layer.0.SelfAttention.o', 'l_27': 'encoder.block.17.layer.0.dropout', 'l_28': 'encoder.block.17.layer.1.layer_norm', 'l_29': 'encoder.block.17.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.17.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.17.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.17.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.18.layer.0.layer_norm', 'l_1': 'encoder.block.18.layer.0.SelfAttention.q', 'l_2': 'encoder.block.18.layer.0.SelfAttention.k', 'l_3': 'encoder.block.18.layer.0.SelfAttention.v', 'l_4': 'encoder.block.18.layer.0.SelfAttention.o', 'l_5': 'encoder.block.18.layer.0.dropout', 'l_6': 'encoder.block.18.layer.1.layer_norm', 'l_7': 'encoder.block.18.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.18.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.18.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.18.layer.1.dropout', 'l_11': 'encoder.block.19.layer.0.layer_norm', 'l_12': 'encoder.block.19.layer.0.SelfAttention.q', 'l_13': 'encoder.block.19.layer.0.SelfAttention.k', 'l_14': 'encoder.block.19.layer.0.SelfAttention.v', 'l_15': 'encoder.block.19.layer.0.SelfAttention.o', 'l_16': 'encoder.block.19.layer.0.dropout', 'l_17': 'encoder.block.19.layer.1.layer_norm', 'l_18': 'encoder.block.19.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.19.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.19.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.19.layer.1.dropout', 'l_22': 'encoder.block.20.layer.0.layer_norm', 'l_23': 'encoder.block.20.layer.0.SelfAttention.q', 'l_24': 'encoder.block.20.layer.0.SelfAttention.k', 'l_25': 'encoder.block.20.layer.0.SelfAttention.v', 'l_26': 'encoder.block.20.layer.0.SelfAttention.o', 'l_27': 'encoder.block.20.layer.0.dropout', 'l_28': 'encoder.block.20.layer.1.layer_norm', 'l_29': 'encoder.block.20.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.20.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.20.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.20.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.21.layer.0.layer_norm', 'l_1': 'encoder.block.21.layer.0.SelfAttention.q', 'l_2': 'encoder.block.21.layer.0.SelfAttention.k', 'l_3': 'encoder.block.21.layer.0.SelfAttention.v', 'l_4': 'encoder.block.21.layer.0.SelfAttention.o', 'l_5': 'encoder.block.21.layer.0.dropout', 'l_6': 'encoder.block.21.layer.1.layer_norm', 'l_7': 'encoder.block.21.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.21.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.21.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.21.layer.1.dropout', 'l_11': 'encoder.block.22.layer.0.layer_norm', 'l_12': 'encoder.block.22.layer.0.SelfAttention.q', 'l_13': 'encoder.block.22.layer.0.SelfAttention.k', 'l_14': 'encoder.block.22.layer.0.SelfAttention.v', 'l_15': 'encoder.block.22.layer.0.SelfAttention.o', 'l_16': 'encoder.block.22.layer.0.dropout', 'l_17': 'encoder.block.22.layer.1.layer_norm', 'l_18': 'encoder.block.22.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.22.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.22.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.22.layer.1.dropout', 'l_22': 'encoder.block.23.layer.0.layer_norm', 'l_23': 'encoder.block.23.layer.0.SelfAttention.q', 'l_24': 'encoder.block.23.layer.0.SelfAttention.k', 'l_25': 'encoder.block.23.layer.0.SelfAttention.v', 'l_26': 'encoder.block.23.layer.0.SelfAttention.o', 'l_27': 'encoder.block.23.layer.0.dropout', 'l_28': 'encoder.block.23.layer.1.layer_norm', 'l_29': 'encoder.block.23.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.23.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.23.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.23.layer.1.dropout', 'l_33': 'encoder.final_layer_norm'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_7 = self.l_33(t_7) t_0 = t_0[2] return (t_7,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition8(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Embedding[relative_attention_bias]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:8'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'encoder.dropout', 'l_1': 'decoder.block.0.layer.0.layer_norm', 'l_2': 'decoder.block.0.layer.0.SelfAttention.q', 'l_3': 'decoder.block.0.layer.0.SelfAttention.k', 'l_4': 'decoder.block.0.layer.0.SelfAttention.v', 'l_5': 'decoder.block.0.layer.0.SelfAttention.relative_attention_bias', 'l_6': 'decoder.block.0.layer.0.SelfAttention.o', 'l_7': 'decoder.block.0.layer.0.dropout', 'l_8': 'decoder.block.0.layer.1.layer_norm', 'l_9': 'decoder.block.0.layer.1.EncDecAttention.q', 'l_10': 'decoder.block.0.layer.1.EncDecAttention.k', 'l_11': 'decoder.block.0.layer.1.EncDecAttention.v', 'l_12': 'decoder.block.0.layer.1.EncDecAttention.o', 'l_13': 'decoder.block.0.layer.1.dropout', 'l_14': 'decoder.block.0.layer.2.layer_norm', 'l_15': 'decoder.block.0.layer.2.DenseReluDense.wi', 'l_16': 'decoder.block.0.layer.2.DenseReluDense.dropout', 'l_17': 'decoder.block.0.layer.2.DenseReluDense.wo', 'l_18': 'decoder.block.0.layer.2.dropout', 'l_19': 'decoder.block.1.layer.0.layer_norm', 'l_20': 'decoder.block.1.layer.0.SelfAttention.q', 'l_21': 'decoder.block.1.layer.0.SelfAttention.k', 'l_22': 'decoder.block.1.layer.0.SelfAttention.v', 'l_23': 'decoder.block.1.layer.0.SelfAttention.o', 'l_24': 'decoder.block.1.layer.0.dropout', 'l_25': 'decoder.block.1.layer.1.layer_norm', 'l_26': 'decoder.block.1.layer.1.EncDecAttention.q', 'l_27': 'decoder.block.1.layer.1.EncDecAttention.k', 'l_28': 'decoder.block.1.layer.1.EncDecAttention.v', 'l_29': 'decoder.block.1.layer.1.EncDecAttention.o', 'l_30': 'decoder.block.1.layer.1.dropout', 'l_31': 'decoder.block.1.layer.2.layer_norm', 'l_32': 'decoder.block.1.layer.2.DenseReluDense.wi', 'l_33': 'decoder.block.1.layer.2.DenseReluDense.dropout', 'l_34': 'decoder.block.1.layer.2.DenseReluDense.wo', 'l_35': 'decoder.block.1.layer.2.dropout', 'l_36': 'decoder.block.2.layer.0.layer_norm', 'l_37': 'decoder.block.2.layer.0.SelfAttention.q', 'l_38': 'decoder.block.2.layer.0.SelfAttention.k', 'l_39': 'decoder.block.2.layer.0.SelfAttention.v', 'l_40': 'decoder.block.2.layer.0.SelfAttention.o', 'l_41': 'decoder.block.2.layer.0.dropout', 'l_42': 'decoder.block.2.layer.1.layer_norm', 'l_43': 'decoder.block.2.layer.1.EncDecAttention.q', 'l_44': 'decoder.block.2.layer.1.EncDecAttention.k', 'l_45': 'decoder.block.2.layer.1.EncDecAttention.v', 'l_46': 'decoder.block.2.layer.1.EncDecAttention.o', 'l_47': 'decoder.block.2.layer.1.dropout', 'l_48': 'decoder.block.2.layer.2.layer_norm', 'l_49': 'decoder.block.2.layer.2.DenseReluDense.wi', 'l_50': 'decoder.block.2.layer.2.DenseReluDense.dropout', 'l_51': 'decoder.block.2.layer.2.DenseReluDense.wo', 'l_52': 'decoder.block.2.layer.2.dropout', 'l_53': 'decoder.block.21.layer.1.EncDecAttention.k'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_10(t_0) t_2 = self.l_11(t_0) t_3 = self.l_27(t_0) t_4 = self.l_28(t_0) t_5 = self.l_44(t_0) t_6 = self.l_45(t_0) t_7 = self.l_53(t_0) t_8 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_8 = t_8.to(dtype=torch.float32) t_8 = (1.0 - t_8) t_8 = (t_8 * (- 1000000000.0)) t_9 = self.l_1(x1) t_10 = self.l_2(t_9) t_11 = self.l_3(t_9) t_12 = self.l_4(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_13 = t_9[0] t_9 = t_9[1] t_10 = t_10.view(t_13, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_11 = t_11.view(t_13, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_13, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_10, t_11) t_10 = torch.arange(t_9, dtype=torch.int64, device=self.device) t_10 = t_10[(slice(None, None, None), None)] t_9 = torch.arange(t_9, dtype=torch.int64, device=self.device) t_9 = t_9[(None, slice(None, None, None))] t_10 = (t_9 - t_10) t_9 = torch.zeros_like(t_10, device=self.device) t_9 = torch.min(t_10, t_9) t_9 = (- t_9) t_10 = t_9.float() t_14 = (t_9 < 16) t_10 = (t_10 / 16) t_10 = torch.log(t_10) t_15 = math.log(8.0) t_15 = (t_10 / t_15) t_15 = (t_15 * 16) t_15 = t_15.to(torch.int64) t_15 = (16 + t_15) t_10 = torch.full_like(t_15, 31, device=self.device) t_10 = torch.min(t_15, t_10) t_10 = torch.where(t_14, t_9, t_10) t_10 = (0 + t_10) t_10 = t_10.to(self.device) t_10 = self.l_5(t_10) t_10 = t_10.permute([2, 0, 1]) t_10 = t_10.unsqueeze(0) t_10 = (t_10 + x2) t_11 += t_10 t_9 = t_11.float() t_9 = torch.nn.functional.softmax(t_9, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_9.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_13 = t_12.view(t_13, (- 1), 4096) t_13 = self.l_6(t_13) t_12 = self.l_7(t_13) t_12 = (x1 + t_12) t_10 = (t_13, None, t_10) t_13 = t_10[0] t_12 = (t_12,) t_10 = t_10[slice(1, None, None)] t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_11 = t_12[0] t_9 = self.l_8(t_11) t_12 = t_12[1] t_10 = t_10[slice(2, None, None)] t_14 = self.l_9(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_15 = t_9[0] t_9 = t_9[1] t_16 = t_0.shape t_16 = t_16[1] t_14 = t_14.view(t_15, (- 1), 32, 128) t_14 = t_14.transpose(1, 2) t_1 = t_1.view(t_15, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_15, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_14, t_1) t_14 = t_1.dtype t_16 = (1, 32, t_9, t_16) t_14 = torch.zeros(t_16, device=self.device, dtype=t_14) t_8 = (t_14 + t_8) t_1 += t_8 t_14 = t_1.float() t_14 = torch.nn.functional.softmax(t_14, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_14.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_2 = torch.matmul(t_1, t_2) t_2 = t_2.transpose(1, 2) t_2 = t_2.contiguous() t_15 = t_2.view(t_15, (- 1), 4096) t_15 = self.l_12(t_15) t_2 = self.l_13(t_15) t_2 = (t_11 + t_2) t_8 = (t_15, None, t_8) t_15 = t_8[0] t_2 = (t_2,) t_8 = t_8[slice(1, None, None)] t_8 = (t_2 + t_8) t_2 = t_8[0] t_11 = self.l_14(t_2) t_8 = t_8[slice(2, None, None)] t_8 = (t_10 + t_8) t_11 = self.l_15(t_11) t_11 = torch.nn.functional.relu(t_11, inplace=False) t_11 = self.l_16(t_11) t_11 = self.l_17(t_11) t_11 = self.l_18(t_11) t_11 = (t_2 + t_11) t_12 = (t_11, t_12) t_8 = (t_12 + t_8) t_12 = t_8[slice(None, 2, None)] t_12 = t_12[0] t_11 = self.l_19(t_12) t_2 = t_8[2] t_8 = t_8[3] t_10 = self.l_20(t_11) t_1 = self.l_21(t_11) t_14 = self.l_22(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_10 = t_10.view(t_11, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_1 = t_1.view(t_11, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_14 = t_14.view(t_11, (- 1), 32, 128) t_14 = t_14.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_10, t_1) t_1 += t_2 t_10 = t_1.float() t_10 = torch.nn.functional.softmax(t_10, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_10.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_14 = torch.matmul(t_1, t_14) t_14 = t_14.transpose(1, 2) t_14 = t_14.contiguous() t_11 = t_14.view(t_11, (- 1), 4096) t_11 = self.l_23(t_11) t_14 = self.l_24(t_11) t_14 = (t_12 + t_14) t_2 = (t_11, None, t_2) t_11 = t_2[0] t_14 = (t_14,) t_2 = t_2[slice(1, None, None)] t_2 = (t_14 + t_2) t_14 = t_2[slice(None, 2, None)] t_12 = t_14[0] t_1 = self.l_25(t_12) t_14 = t_14[1] t_2 = t_2[slice(2, None, None)] t_10 = self.l_26(t_1) t_1 = t_1.shape t_1 = t_1[slice(None, 2, None)] t_1 = t_1[0] t_10 = t_10.view(t_1, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_3 = t_3.view(t_1, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_4 = t_4.view(t_1, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_3 = t_3.transpose(3, 2) t_3 = torch.matmul(t_10, t_3) t_3 += t_8 t_10 = t_3.float() t_10 = torch.nn.functional.softmax(t_10, dim=(- 1), _stacklevel=3, dtype=None) t_3 = t_10.type_as(t_3) t_3 = torch.nn.functional.dropout(t_3, p=0.1, training=self.training, inplace=False) t_4 = torch.matmul(t_3, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_1 = t_4.view(t_1, (- 1), 4096) t_1 = self.l_29(t_1) t_4 = self.l_30(t_1) t_4 = (t_12 + t_4) t_8 = (t_1, None, t_8) t_1 = t_8[0] t_4 = (t_4,) t_8 = t_8[slice(1, None, None)] t_8 = (t_4 + t_8) t_4 = t_8[0] t_12 = self.l_31(t_4) t_8 = t_8[slice(2, None, None)] t_8 = (t_2 + t_8) t_12 = self.l_32(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_33(t_12) t_12 = self.l_34(t_12) t_12 = self.l_35(t_12) t_12 = (t_4 + t_12) t_14 = (t_12, t_14) t_8 = (t_14 + t_8) t_14 = t_8[slice(None, 2, None)] t_14 = t_14[0] t_12 = self.l_36(t_14) t_4 = t_8[2] t_8 = t_8[3] t_2 = self.l_37(t_12) t_3 = self.l_38(t_12) t_10 = self.l_39(t_12) t_12 = t_12.shape t_12 = t_12[slice(None, 2, None)] t_12 = t_12[0] t_2 = t_2.view(t_12, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_12, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_10 = t_10.view(t_12, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_3 = t_3.transpose(3, 2) t_3 = torch.matmul(t_2, t_3) t_3 += t_4 t_2 = t_3.float() t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None) t_3 = t_2.type_as(t_3) t_3 = torch.nn.functional.dropout(t_3, p=0.1, training=self.training, inplace=False) t_10 = torch.matmul(t_3, t_10) t_10 = t_10.transpose(1, 2) t_10 = t_10.contiguous() t_12 = t_10.view(t_12, (- 1), 4096) t_12 = self.l_40(t_12) t_10 = self.l_41(t_12) t_10 = (t_14 + t_10) t_4 = (t_12, None, t_4) t_12 = t_4[0] t_10 = (t_10,) t_4 = t_4[slice(1, None, None)] t_4 = (t_10 + t_4) t_10 = t_4[slice(None, 2, None)] t_14 = t_10[0] t_3 = self.l_42(t_14) t_10 = t_10[1] t_4 = t_4[slice(2, None, None)] t_2 = self.l_43(t_3) t_3 = t_3.shape t_3 = t_3[slice(None, 2, None)] t_3 = t_3[0] t_2 = t_2.view(t_3, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_5 = t_5.view(t_3, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_3, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_2, t_5) t_5 += t_8 t_2 = t_5.float() t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_2.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_3 = t_6.view(t_3, (- 1), 4096) t_3 = self.l_46(t_3) t_6 = self.l_47(t_3) t_6 = (t_14 + t_6) t_8 = (t_3, None, t_8) t_3 = t_8[0] t_6 = (t_6,) t_8 = t_8[slice(1, None, None)] t_8 = (t_6 + t_8) t_6 = t_8[0] t_14 = self.l_48(t_6) t_8 = t_8[slice(2, None, None)] t_8 = (t_4 + t_8) t_14 = self.l_49(t_14) t_14 = torch.nn.functional.relu(t_14, inplace=False) t_14 = self.l_50(t_14) t_14 = self.l_51(t_14) t_14 = self.l_52(t_14) t_14 = (t_6 + t_14) t_10 = (t_14, t_10) t_8 = (t_10 + t_8) t_10 = t_8[slice(None, 2, None)] t_10 = t_10[0] t_14 = t_8[2] t_8 = t_8[3] return list(flatten((t_0, t_7, t_10, t_14, t_8))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition9(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:9'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.3.layer.0.layer_norm', 'l_1': 'decoder.block.3.layer.0.SelfAttention.q', 'l_2': 'decoder.block.3.layer.0.SelfAttention.k', 'l_3': 'decoder.block.3.layer.0.SelfAttention.v', 'l_4': 'decoder.block.3.layer.0.SelfAttention.o', 'l_5': 'decoder.block.3.layer.0.dropout', 'l_6': 'decoder.block.3.layer.1.layer_norm', 'l_7': 'decoder.block.3.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.3.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.3.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.3.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.3.layer.1.dropout', 'l_12': 'decoder.block.3.layer.2.layer_norm', 'l_13': 'decoder.block.3.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.3.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.3.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.3.layer.2.dropout', 'l_17': 'decoder.block.4.layer.0.layer_norm', 'l_18': 'decoder.block.4.layer.0.SelfAttention.q', 'l_19': 'decoder.block.4.layer.0.SelfAttention.k', 'l_20': 'decoder.block.4.layer.0.SelfAttention.v', 'l_21': 'decoder.block.4.layer.0.SelfAttention.o', 'l_22': 'decoder.block.4.layer.0.dropout', 'l_23': 'decoder.block.4.layer.1.layer_norm', 'l_24': 'decoder.block.4.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.4.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.4.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.4.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.4.layer.1.dropout', 'l_29': 'decoder.block.4.layer.2.layer_norm', 'l_30': 'decoder.block.4.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.4.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.4.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.4.layer.2.dropout', 'l_34': 'decoder.block.5.layer.0.layer_norm', 'l_35': 'decoder.block.5.layer.0.SelfAttention.q', 'l_36': 'decoder.block.5.layer.0.SelfAttention.k', 'l_37': 'decoder.block.5.layer.0.SelfAttention.v', 'l_38': 'decoder.block.5.layer.0.SelfAttention.o', 'l_39': 'decoder.block.5.layer.0.dropout', 'l_40': 'decoder.block.5.layer.1.layer_norm', 'l_41': 'decoder.block.5.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.5.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.5.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.5.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.5.layer.1.dropout', 'l_46': 'decoder.block.5.layer.2.layer_norm', 'l_47': 'decoder.block.5.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.5.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.5.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.5.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition10(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:10'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.6.layer.0.layer_norm', 'l_1': 'decoder.block.6.layer.0.SelfAttention.q', 'l_2': 'decoder.block.6.layer.0.SelfAttention.k', 'l_3': 'decoder.block.6.layer.0.SelfAttention.v', 'l_4': 'decoder.block.6.layer.0.SelfAttention.o', 'l_5': 'decoder.block.6.layer.0.dropout', 'l_6': 'decoder.block.6.layer.1.layer_norm', 'l_7': 'decoder.block.6.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.6.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.6.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.6.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.6.layer.1.dropout', 'l_12': 'decoder.block.6.layer.2.layer_norm', 'l_13': 'decoder.block.6.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.6.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.6.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.6.layer.2.dropout', 'l_17': 'decoder.block.7.layer.0.layer_norm', 'l_18': 'decoder.block.7.layer.0.SelfAttention.q', 'l_19': 'decoder.block.7.layer.0.SelfAttention.k', 'l_20': 'decoder.block.7.layer.0.SelfAttention.v', 'l_21': 'decoder.block.7.layer.0.SelfAttention.o', 'l_22': 'decoder.block.7.layer.0.dropout', 'l_23': 'decoder.block.7.layer.1.layer_norm', 'l_24': 'decoder.block.7.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.7.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.7.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.7.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.7.layer.1.dropout', 'l_29': 'decoder.block.7.layer.2.layer_norm', 'l_30': 'decoder.block.7.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.7.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.7.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.7.layer.2.dropout', 'l_34': 'decoder.block.8.layer.0.layer_norm', 'l_35': 'decoder.block.8.layer.0.SelfAttention.q', 'l_36': 'decoder.block.8.layer.0.SelfAttention.k', 'l_37': 'decoder.block.8.layer.0.SelfAttention.v', 'l_38': 'decoder.block.8.layer.0.SelfAttention.o', 'l_39': 'decoder.block.8.layer.0.dropout', 'l_40': 'decoder.block.8.layer.1.layer_norm', 'l_41': 'decoder.block.8.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.8.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.8.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.8.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.8.layer.1.dropout', 'l_46': 'decoder.block.8.layer.2.layer_norm', 'l_47': 'decoder.block.8.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.8.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.8.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.8.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition11(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:11'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.9.layer.0.layer_norm', 'l_1': 'decoder.block.9.layer.0.SelfAttention.q', 'l_2': 'decoder.block.9.layer.0.SelfAttention.k', 'l_3': 'decoder.block.9.layer.0.SelfAttention.v', 'l_4': 'decoder.block.9.layer.0.SelfAttention.o', 'l_5': 'decoder.block.9.layer.0.dropout', 'l_6': 'decoder.block.9.layer.1.layer_norm', 'l_7': 'decoder.block.9.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.9.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.9.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.9.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.9.layer.1.dropout', 'l_12': 'decoder.block.9.layer.2.layer_norm', 'l_13': 'decoder.block.9.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.9.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.9.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.9.layer.2.dropout', 'l_17': 'decoder.block.10.layer.0.layer_norm', 'l_18': 'decoder.block.10.layer.0.SelfAttention.q', 'l_19': 'decoder.block.10.layer.0.SelfAttention.k', 'l_20': 'decoder.block.10.layer.0.SelfAttention.v', 'l_21': 'decoder.block.10.layer.0.SelfAttention.o', 'l_22': 'decoder.block.10.layer.0.dropout', 'l_23': 'decoder.block.10.layer.1.layer_norm', 'l_24': 'decoder.block.10.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.10.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.10.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.10.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.10.layer.1.dropout', 'l_29': 'decoder.block.10.layer.2.layer_norm', 'l_30': 'decoder.block.10.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.10.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.10.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.10.layer.2.dropout', 'l_34': 'decoder.block.11.layer.0.layer_norm', 'l_35': 'decoder.block.11.layer.0.SelfAttention.q', 'l_36': 'decoder.block.11.layer.0.SelfAttention.k', 'l_37': 'decoder.block.11.layer.0.SelfAttention.v', 'l_38': 'decoder.block.11.layer.0.SelfAttention.o', 'l_39': 'decoder.block.11.layer.0.dropout', 'l_40': 'decoder.block.11.layer.1.layer_norm', 'l_41': 'decoder.block.11.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.11.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.11.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.11.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.11.layer.1.dropout', 'l_46': 'decoder.block.11.layer.2.layer_norm', 'l_47': 'decoder.block.11.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.11.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.11.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.11.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition12(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:12'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.12.layer.0.layer_norm', 'l_1': 'decoder.block.12.layer.0.SelfAttention.q', 'l_2': 'decoder.block.12.layer.0.SelfAttention.k', 'l_3': 'decoder.block.12.layer.0.SelfAttention.v', 'l_4': 'decoder.block.12.layer.0.SelfAttention.o', 'l_5': 'decoder.block.12.layer.0.dropout', 'l_6': 'decoder.block.12.layer.1.layer_norm', 'l_7': 'decoder.block.12.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.12.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.12.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.12.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.12.layer.1.dropout', 'l_12': 'decoder.block.12.layer.2.layer_norm', 'l_13': 'decoder.block.12.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.12.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.12.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.12.layer.2.dropout', 'l_17': 'decoder.block.13.layer.0.layer_norm', 'l_18': 'decoder.block.13.layer.0.SelfAttention.q', 'l_19': 'decoder.block.13.layer.0.SelfAttention.k', 'l_20': 'decoder.block.13.layer.0.SelfAttention.v', 'l_21': 'decoder.block.13.layer.0.SelfAttention.o', 'l_22': 'decoder.block.13.layer.0.dropout', 'l_23': 'decoder.block.13.layer.1.layer_norm', 'l_24': 'decoder.block.13.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.13.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.13.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.13.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.13.layer.1.dropout', 'l_29': 'decoder.block.13.layer.2.layer_norm', 'l_30': 'decoder.block.13.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.13.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.13.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.13.layer.2.dropout', 'l_34': 'decoder.block.14.layer.0.layer_norm', 'l_35': 'decoder.block.14.layer.0.SelfAttention.q', 'l_36': 'decoder.block.14.layer.0.SelfAttention.k', 'l_37': 'decoder.block.14.layer.0.SelfAttention.v', 'l_38': 'decoder.block.14.layer.0.SelfAttention.o', 'l_39': 'decoder.block.14.layer.0.dropout', 'l_40': 'decoder.block.14.layer.1.layer_norm', 'l_41': 'decoder.block.14.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.14.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.14.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.14.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.14.layer.1.dropout', 'l_46': 'decoder.block.14.layer.2.layer_norm', 'l_47': 'decoder.block.14.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.14.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.14.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.14.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition13(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:13'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.15.layer.0.layer_norm', 'l_1': 'decoder.block.15.layer.0.SelfAttention.q', 'l_2': 'decoder.block.15.layer.0.SelfAttention.k', 'l_3': 'decoder.block.15.layer.0.SelfAttention.v', 'l_4': 'decoder.block.15.layer.0.SelfAttention.o', 'l_5': 'decoder.block.15.layer.0.dropout', 'l_6': 'decoder.block.15.layer.1.layer_norm', 'l_7': 'decoder.block.15.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.15.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.15.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.15.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.15.layer.1.dropout', 'l_12': 'decoder.block.15.layer.2.layer_norm', 'l_13': 'decoder.block.15.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.15.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.15.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.15.layer.2.dropout', 'l_17': 'decoder.block.16.layer.0.layer_norm', 'l_18': 'decoder.block.16.layer.0.SelfAttention.q', 'l_19': 'decoder.block.16.layer.0.SelfAttention.k', 'l_20': 'decoder.block.16.layer.0.SelfAttention.v', 'l_21': 'decoder.block.16.layer.0.SelfAttention.o', 'l_22': 'decoder.block.16.layer.0.dropout', 'l_23': 'decoder.block.16.layer.1.layer_norm', 'l_24': 'decoder.block.16.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.16.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.16.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.16.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.16.layer.1.dropout', 'l_29': 'decoder.block.16.layer.2.layer_norm', 'l_30': 'decoder.block.16.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.16.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.16.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.16.layer.2.dropout', 'l_34': 'decoder.block.17.layer.0.layer_norm', 'l_35': 'decoder.block.17.layer.0.SelfAttention.q', 'l_36': 'decoder.block.17.layer.0.SelfAttention.k', 'l_37': 'decoder.block.17.layer.0.SelfAttention.v', 'l_38': 'decoder.block.17.layer.0.SelfAttention.o', 'l_39': 'decoder.block.17.layer.0.dropout', 'l_40': 'decoder.block.17.layer.1.layer_norm', 'l_41': 'decoder.block.17.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.17.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.17.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.17.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.17.layer.1.dropout', 'l_46': 'decoder.block.17.layer.2.layer_norm', 'l_47': 'decoder.block.17.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.17.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.17.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.17.layer.2.dropout', 'l_51': 'decoder.block.18.layer.0.layer_norm', 'l_52': 'decoder.block.18.layer.0.SelfAttention.q'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = self.l_51(t_6) t_5 = t_10[2] t_10 = t_10[3] t_3 = self.l_52(t_12) return list(flatten((x0, t_6, t_12, t_5, t_10, t_3))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition14(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:14'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.18.layer.0.SelfAttention.k', 'l_1': 'decoder.block.18.layer.0.SelfAttention.v', 'l_2': 'decoder.block.18.layer.0.SelfAttention.o', 'l_3': 'decoder.block.18.layer.0.dropout', 'l_4': 'decoder.block.18.layer.1.layer_norm', 'l_5': 'decoder.block.18.layer.1.EncDecAttention.q', 'l_6': 'decoder.block.18.layer.1.EncDecAttention.k', 'l_7': 'decoder.block.18.layer.1.EncDecAttention.v', 'l_8': 'decoder.block.18.layer.1.EncDecAttention.o', 'l_9': 'decoder.block.18.layer.1.dropout', 'l_10': 'decoder.block.18.layer.2.layer_norm', 'l_11': 'decoder.block.18.layer.2.DenseReluDense.wi', 'l_12': 'decoder.block.18.layer.2.DenseReluDense.dropout', 'l_13': 'decoder.block.18.layer.2.DenseReluDense.wo', 'l_14': 'decoder.block.18.layer.2.dropout', 'l_15': 'decoder.block.19.layer.0.layer_norm', 'l_16': 'decoder.block.19.layer.0.SelfAttention.q', 'l_17': 'decoder.block.19.layer.0.SelfAttention.k', 'l_18': 'decoder.block.19.layer.0.SelfAttention.v', 'l_19': 'decoder.block.19.layer.0.SelfAttention.o', 'l_20': 'decoder.block.19.layer.0.dropout', 'l_21': 'decoder.block.19.layer.1.layer_norm', 'l_22': 'decoder.block.19.layer.1.EncDecAttention.q', 'l_23': 'decoder.block.19.layer.1.EncDecAttention.k', 'l_24': 'decoder.block.19.layer.1.EncDecAttention.v', 'l_25': 'decoder.block.19.layer.1.EncDecAttention.o', 'l_26': 'decoder.block.19.layer.1.dropout', 'l_27': 'decoder.block.19.layer.2.layer_norm', 'l_28': 'decoder.block.19.layer.2.DenseReluDense.wi', 'l_29': 'decoder.block.19.layer.2.DenseReluDense.dropout', 'l_30': 'decoder.block.19.layer.2.DenseReluDense.wo', 'l_31': 'decoder.block.19.layer.2.dropout', 'l_32': 'decoder.block.20.layer.0.layer_norm', 'l_33': 'decoder.block.20.layer.0.SelfAttention.q', 'l_34': 'decoder.block.20.layer.0.SelfAttention.k', 'l_35': 'decoder.block.20.layer.0.SelfAttention.v', 'l_36': 'decoder.block.20.layer.0.SelfAttention.o', 'l_37': 'decoder.block.20.layer.0.dropout', 'l_38': 'decoder.block.20.layer.1.layer_norm', 'l_39': 'decoder.block.20.layer.1.EncDecAttention.q', 'l_40': 'decoder.block.20.layer.1.EncDecAttention.k', 'l_41': 'decoder.block.20.layer.1.EncDecAttention.v', 'l_42': 'decoder.block.20.layer.1.EncDecAttention.o', 'l_43': 'decoder.block.20.layer.1.dropout', 'l_44': 'decoder.block.20.layer.2.layer_norm', 'l_45': 'decoder.block.20.layer.2.DenseReluDense.wi', 'l_46': 'decoder.block.20.layer.2.DenseReluDense.dropout', 'l_47': 'decoder.block.20.layer.2.DenseReluDense.wo', 'l_48': 'decoder.block.20.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure) t_0 = self.l_6(x0) t_1 = self.l_7(x0) t_2 = self.l_23(x0) t_3 = self.l_24(x0) t_4 = self.l_40(x0) t_5 = self.l_41(x0) t_6 = self.l_0(x2) t_7 = self.l_1(x2) t_8 = x2.shape t_8 = t_8[slice(None, 2, None)] t_8 = t_8[0] t_9 = x5.view(t_8, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_6 = t_6.view(t_8, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_7 = t_7.view(t_8, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_6 = t_6.transpose(3, 2) t_6 = torch.matmul(t_9, t_6) t_6 += x3 t_9 = t_6.float() t_9 = torch.nn.functional.softmax(t_9, dim=(- 1), _stacklevel=3, dtype=None) t_6 = t_9.type_as(t_6) t_6 = torch.nn.functional.dropout(t_6, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_6, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_8 = t_7.view(t_8, (- 1), 4096) t_8 = self.l_2(t_8) t_7 = self.l_3(t_8) t_7 = (x1 + t_7) t_8 = (t_8, None, x3) t_6 = t_8[0] t_7 = (t_7,) t_8 = t_8[slice(1, None, None)] t_8 = (t_7 + t_8) t_7 = t_8[slice(None, 2, None)] t_9 = t_7[0] t_10 = self.l_4(t_9) t_7 = t_7[1] t_8 = t_8[slice(2, None, None)] t_11 = self.l_5(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x4 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_8(t_10) t_1 = self.l_9(t_10) t_1 = (t_9 + t_1) t_10 = (t_10, None, x4) t_9 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_10(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_8 + t_10) t_0 = self.l_11(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) t_0 = (t_1 + t_0) t_7 = (t_0, t_7) t_10 = (t_7 + t_10) t_7 = t_10[slice(None, 2, None)] t_7 = t_7[0] t_0 = self.l_15(t_7) t_1 = t_10[2] t_10 = t_10[3] t_8 = self.l_16(t_0) t_11 = self.l_17(t_0) t_12 = self.l_18(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_8 = t_8.view(t_0, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_8, t_11) t_11 += t_1 t_8 = t_11.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_8.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_19(t_0) t_12 = self.l_20(t_0) t_12 = (t_7 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_7 = t_12[0] t_11 = self.l_21(t_7) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_8 = self.l_22(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_8 = t_8.view(t_11, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_8, t_2) t_2 += t_10 t_8 = t_2.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_8.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_25(t_11) t_3 = self.l_26(t_11) t_3 = (t_7 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_7 = self.l_27(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_7 = self.l_28(t_7) t_7 = torch.nn.functional.relu(t_7, inplace=False) t_7 = self.l_29(t_7) t_7 = self.l_30(t_7) t_7 = self.l_31(t_7) t_7 = (t_3 + t_7) t_12 = (t_7, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_7 = self.l_32(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_33(t_7) t_2 = self.l_34(t_7) t_8 = self.l_35(t_7) t_7 = t_7.shape t_7 = t_7[slice(None, 2, None)] t_7 = t_7[0] t_1 = t_1.view(t_7, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_7, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_8 = t_8.view(t_7, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_8 = torch.matmul(t_2, t_8) t_8 = t_8.transpose(1, 2) t_8 = t_8.contiguous() t_7 = t_8.view(t_7, (- 1), 4096) t_7 = self.l_36(t_7) t_8 = self.l_37(t_7) t_8 = (t_12 + t_8) t_3 = (t_7, None, t_3) t_7 = t_3[0] t_8 = (t_8,) t_3 = t_3[slice(1, None, None)] t_3 = (t_8 + t_3) t_8 = t_3[slice(None, 2, None)] t_12 = t_8[0] t_2 = self.l_38(t_12) t_8 = t_8[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_39(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_42(t_2) t_5 = self.l_43(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_44(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_45(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_46(t_12) t_12 = self.l_47(t_12) t_12 = self.l_48(t_12) t_12 = (t_5 + t_12) t_8 = (t_12, t_8) t_10 = (t_8 + t_10) t_8 = t_10[slice(None, 2, None)] t_8 = t_8[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_8, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition15(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:15'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.21.layer.0.layer_norm', 'l_1': 'decoder.block.21.layer.0.SelfAttention.q', 'l_2': 'decoder.block.21.layer.0.SelfAttention.k', 'l_3': 'decoder.block.21.layer.0.SelfAttention.v', 'l_4': 'decoder.block.21.layer.0.SelfAttention.o', 'l_5': 'decoder.block.21.layer.0.dropout', 'l_6': 'decoder.block.21.layer.1.layer_norm', 'l_7': 'decoder.block.21.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.21.layer.1.EncDecAttention.v', 'l_9': 'decoder.block.21.layer.1.EncDecAttention.o', 'l_10': 'decoder.block.21.layer.1.dropout', 'l_11': 'decoder.block.21.layer.2.layer_norm', 'l_12': 'decoder.block.21.layer.2.DenseReluDense.wi', 'l_13': 'decoder.block.21.layer.2.DenseReluDense.dropout', 'l_14': 'decoder.block.21.layer.2.DenseReluDense.wo', 'l_15': 'decoder.block.21.layer.2.dropout', 'l_16': 'decoder.block.22.layer.0.layer_norm', 'l_17': 'decoder.block.22.layer.0.SelfAttention.q', 'l_18': 'decoder.block.22.layer.0.SelfAttention.k', 'l_19': 'decoder.block.22.layer.0.SelfAttention.v', 'l_20': 'decoder.block.22.layer.0.SelfAttention.o', 'l_21': 'decoder.block.22.layer.0.dropout', 'l_22': 'decoder.block.22.layer.1.layer_norm', 'l_23': 'decoder.block.22.layer.1.EncDecAttention.q', 'l_24': 'decoder.block.22.layer.1.EncDecAttention.k', 'l_25': 'decoder.block.22.layer.1.EncDecAttention.v', 'l_26': 'decoder.block.22.layer.1.EncDecAttention.o', 'l_27': 'decoder.block.22.layer.1.dropout', 'l_28': 'decoder.block.22.layer.2.layer_norm', 'l_29': 'decoder.block.22.layer.2.DenseReluDense.wi', 'l_30': 'decoder.block.22.layer.2.DenseReluDense.dropout', 'l_31': 'decoder.block.22.layer.2.DenseReluDense.wo', 'l_32': 'decoder.block.22.layer.2.dropout', 'l_33': 'decoder.block.23.layer.0.layer_norm', 'l_34': 'decoder.block.23.layer.0.SelfAttention.q', 'l_35': 'decoder.block.23.layer.0.SelfAttention.k', 'l_36': 'decoder.block.23.layer.0.SelfAttention.v', 'l_37': 'decoder.block.23.layer.0.SelfAttention.o', 'l_38': 'decoder.block.23.layer.0.dropout', 'l_39': 'decoder.block.23.layer.1.layer_norm', 'l_40': 'decoder.block.23.layer.1.EncDecAttention.q', 'l_41': 'decoder.block.23.layer.1.EncDecAttention.k', 'l_42': 'decoder.block.23.layer.1.EncDecAttention.v', 'l_43': 'decoder.block.23.layer.1.EncDecAttention.o', 'l_44': 'decoder.block.23.layer.1.dropout', 'l_45': 'decoder.block.23.layer.2.layer_norm', 'l_46': 'decoder.block.23.layer.2.DenseReluDense.wi', 'l_47': 'decoder.block.23.layer.2.DenseReluDense.dropout', 'l_48': 'decoder.block.23.layer.2.DenseReluDense.wo', 'l_49': 'decoder.block.23.layer.2.dropout', 'l_50': 'decoder.final_layer_norm', 'l_51': 'decoder.dropout', 'l_52': 'lm_head'} self.to(self.device) def forward(self, *args): (labels, x0, x1, x2, x3, x4) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_24(x0) t_2 = self.l_25(x0) t_3 = self.l_41(x0) t_4 = self.l_42(x0) t_5 = self.l_0(x2) t_6 = self.l_1(t_5) t_7 = self.l_2(t_5) t_8 = self.l_3(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_6 = t_6.view(t_5, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_5, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_7 = t_7.transpose(3, 2) t_7 = torch.matmul(t_6, t_7) t_7 += x3 t_6 = t_7.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_7 = t_6.type_as(t_7) t_7 = torch.nn.functional.dropout(t_7, p=0.1, training=self.training, inplace=False) t_8 = torch.matmul(t_7, t_8) t_8 = t_8.transpose(1, 2) t_8 = t_8.contiguous() t_5 = t_8.view(t_5, (- 1), 4096) t_5 = self.l_4(t_5) t_8 = self.l_5(t_5) t_8 = (x2 + t_8) t_5 = (t_5, None, x3) t_7 = t_5[0] t_8 = (t_8,) t_5 = t_5[slice(1, None, None)] t_5 = (t_8 + t_5) t_8 = t_5[slice(None, 2, None)] t_6 = t_8[0] t_9 = self.l_6(t_6) t_8 = t_8[1] t_5 = t_5[slice(2, None, None)] t_10 = self.l_7(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_10 = t_10.view(t_9, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_11 = x1.view(t_9, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_9, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_10, t_11) t_11 += x4 t_10 = t_11.float() t_10 = torch.nn.functional.softmax(t_10, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_10.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_0 = torch.matmul(t_11, t_0) t_0 = t_0.transpose(1, 2) t_0 = t_0.contiguous() t_9 = t_0.view(t_9, (- 1), 4096) t_9 = self.l_9(t_9) t_0 = self.l_10(t_9) t_0 = (t_6 + t_0) t_9 = (t_9, None, x4) t_6 = t_9[0] t_0 = (t_0,) t_9 = t_9[slice(1, None, None)] t_9 = (t_0 + t_9) t_0 = t_9[0] t_11 = self.l_11(t_0) t_9 = t_9[slice(2, None, None)] t_9 = (t_5 + t_9) t_11 = self.l_12(t_11) t_11 = torch.nn.functional.relu(t_11, inplace=False) t_11 = self.l_13(t_11) t_11 = self.l_14(t_11) t_11 = self.l_15(t_11) t_11 = (t_0 + t_11) t_8 = (t_11, t_8) t_9 = (t_8 + t_9) t_8 = t_9[slice(None, 2, None)] t_8 = t_8[0] t_11 = self.l_16(t_8) t_0 = t_9[2] t_9 = t_9[3] t_5 = self.l_17(t_11) t_10 = self.l_18(t_11) t_12 = self.l_19(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_5 = t_5.view(t_11, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_10 = t_10.view(t_11, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_12 = t_12.view(t_11, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_10 = t_10.transpose(3, 2) t_10 = torch.matmul(t_5, t_10) t_10 += t_0 t_5 = t_10.float() t_5 = torch.nn.functional.softmax(t_5, dim=(- 1), _stacklevel=3, dtype=None) t_10 = t_5.type_as(t_10) t_10 = torch.nn.functional.dropout(t_10, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_10, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_11 = t_12.view(t_11, (- 1), 4096) t_11 = self.l_20(t_11) t_12 = self.l_21(t_11) t_12 = (t_8 + t_12) t_0 = (t_11, None, t_0) t_11 = t_0[0] t_12 = (t_12,) t_0 = t_0[slice(1, None, None)] t_0 = (t_12 + t_0) t_12 = t_0[slice(None, 2, None)] t_8 = t_12[0] t_10 = self.l_22(t_8) t_12 = t_12[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_23(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_5 = t_5.view(t_10, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_10, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_5, t_1) t_1 += t_9 t_5 = t_1.float() t_5 = torch.nn.functional.softmax(t_5, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_5.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_2 = torch.matmul(t_1, t_2) t_2 = t_2.transpose(1, 2) t_2 = t_2.contiguous() t_10 = t_2.view(t_10, (- 1), 4096) t_10 = self.l_26(t_10) t_2 = self.l_27(t_10) t_2 = (t_8 + t_2) t_9 = (t_10, None, t_9) t_10 = t_9[0] t_2 = (t_2,) t_9 = t_9[slice(1, None, None)] t_9 = (t_2 + t_9) t_2 = t_9[0] t_8 = self.l_28(t_2) t_9 = t_9[slice(2, None, None)] t_9 = (t_0 + t_9) t_8 = self.l_29(t_8) t_8 = torch.nn.functional.relu(t_8, inplace=False) t_8 = self.l_30(t_8) t_8 = self.l_31(t_8) t_8 = self.l_32(t_8) t_8 = (t_2 + t_8) t_12 = (t_8, t_12) t_9 = (t_12 + t_9) t_12 = t_9[slice(None, 2, None)] t_12 = t_12[0] t_8 = self.l_33(t_12) t_2 = t_9[2] t_9 = t_9[3] t_0 = self.l_34(t_8) t_1 = self.l_35(t_8) t_5 = self.l_36(t_8) t_8 = t_8.shape t_8 = t_8[slice(None, 2, None)] t_8 = t_8[0] t_0 = t_0.view(t_8, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_8, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_8, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_0, t_1) t_1 += t_2 t_0 = t_1.float() t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_0.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_1, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_8 = t_5.view(t_8, (- 1), 4096) t_8 = self.l_37(t_8) t_5 = self.l_38(t_8) t_5 = (t_12 + t_5) t_2 = (t_8, None, t_2) t_8 = t_2[0] t_5 = (t_5,) t_2 = t_2[slice(1, None, None)] t_2 = (t_5 + t_2) t_5 = t_2[slice(None, 2, None)] t_12 = t_5[0] t_1 = self.l_39(t_12) t_5 = t_5[1] t_2 = t_2[slice(2, None, None)] t_0 = self.l_40(t_1) t_1 = t_1.shape t_1 = t_1[slice(None, 2, None)] t_1 = t_1[0] t_0 = t_0.view(t_1, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_3 = t_3.view(t_1, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_4 = t_4.view(t_1, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_3 = t_3.transpose(3, 2) t_3 = torch.matmul(t_0, t_3) t_3 += t_9 t_0 = t_3.float() t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None) t_3 = t_0.type_as(t_3) t_3 = torch.nn.functional.dropout(t_3, p=0.1, training=self.training, inplace=False) t_4 = torch.matmul(t_3, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_1 = t_4.view(t_1, (- 1), 4096) t_1 = self.l_43(t_1) t_4 = self.l_44(t_1) t_4 = (t_12 + t_4) t_9 = (t_1, None, t_9) t_1 = t_9[0] t_4 = (t_4,) t_9 = t_9[slice(1, None, None)] t_9 = (t_4 + t_9) t_4 = t_9[0] t_12 = self.l_45(t_4) t_9 = t_9[slice(2, None, None)] t_9 = (t_2 + t_9) t_12 = self.l_46(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_47(t_12) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = (t_4 + t_12) t_5 = (t_12, t_5) t_9 = (t_5 + t_9) t_5 = t_9[slice(None, 2, None)] t_5 = t_5[0] t_5 = self.l_50(t_5) t_12 = t_9[2] t_9 = t_9[3] t_5 = self.l_51(t_5) t_5 = (t_5 * 0.03125) t_5 = self.l_52(t_5) t_4 = t_5.size((- 1)) t_4 = t_5.view((- 1), t_4) t_5 = labels.view((- 1)) t_5 = torch.nn.functional.cross_entropy(t_4, t_5, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean') return (t_5,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result
def load_state_dict(partition, state_dict, strict=True): reverse_lookup = {v: k for (k, v) in partition.lookup.items()} device = partition.device keys = list(partition.state_dict(None).keys()) new_state = dict() for k in keys: if (k in reverse_lookup): new_state[reverse_lookup[k]] = state_dict[k].to(device) continue idx = k.rfind('.') to_replace = k[:idx] if (to_replace in reverse_lookup): key = (reverse_lookup[to_replace] + k[idx:]) new_state[key] = state_dict[k].to(device) nn.Module.load_state_dict(partition, new_state, strict=strict)
def named_buffers(partition, prefix='', recurse=True): params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def named_parameters(partition, prefix='', recurse=True): params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse) lookup = partition.lookup for (k, v) in params: if (k in lookup): (yield (lookup[k], v)) else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) (yield (new_k, v))
def cpu(partition): partition.device = torch.device('cpu') return nn.Module.cpu(partition)
def cuda(partition, device=None): if (device is None): device = torch.cuda.current_device() partition.device = torch.device(device) return nn.Module.cuda(partition, partition.device)
def to(partition, *args, **kwargs): device = None if ('device' in kwargs): device = kwargs['device'] elif ('tensor' in kwargs): device = kwargs['tensor'].device if args: if isinstance(args[0], (torch.device, int, str)): device = args[0] if torch.is_tensor(args[0]): device = args[0].device if (not (device is None)): partition.device = torch.device(device) return nn.Module.to(partition, *args, **kwargs)
def op_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True)
def create_pipeline_configuration(DEBUG=False, batch_size=2): config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Linear, Dropout, StatelessEmbedding, T5LayerNorm, Embedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([2, 512]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([2, 512]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([2, 512]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([2, 512]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/tuple::__getitem___319_0': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/prim::TupleConstruct_328_0': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/prim::TupleConstruct_328_1': {'shape': None, 'dtype': None, 'req_grad': False, 'is_batched': False, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([2, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/tuple::__getitem___319_0': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/prim::TupleConstruct_328_0': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/prim::TupleConstruct_328_1': {'shape': None, 'dtype': None, 'req_grad': False, 'is_batched': False, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([2, 512, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([2, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([2, 512, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_8': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([2, 512]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_8': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([2, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([2, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([2, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([2, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([2, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([2, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}} batch_dim = config['batch_dim'] for d in chain(config['model_inputs'].values(), config['model_outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) for s in config['stages'].values(): for d in chain(s['inputs'].values(), s['outputs'].values()): if d['is_batched']: shape = d['shape'] d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):])) return config
class Partition0(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Embedding[relative_attention_bias]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]'] TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]'] def __init__(self, layers, tensors, device='cuda:0'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0.layer.0.layer_norm', 'l_3': 'encoder.block.0.layer.0.SelfAttention.q', 'l_4': 'encoder.block.0.layer.0.SelfAttention.k', 'l_5': 'encoder.block.0.layer.0.SelfAttention.v', 'l_6': 'encoder.block.0.layer.0.SelfAttention.relative_attention_bias', 'l_7': 'encoder.block.0.layer.0.SelfAttention.o', 'l_8': 'encoder.block.0.layer.0.dropout', 'l_9': 'encoder.block.0.layer.1.layer_norm', 'l_10': 'encoder.block.0.layer.1.DenseReluDense.wi', 'l_11': 'encoder.block.0.layer.1.DenseReluDense.dropout', 'l_12': 'encoder.block.0.layer.1.DenseReluDense.wo', 'l_13': 'encoder.block.0.layer.1.dropout', 'l_14': 'encoder.block.1.layer.0.layer_norm', 'l_15': 'encoder.block.1.layer.0.SelfAttention.q', 'l_16': 'encoder.block.1.layer.0.SelfAttention.k', 'l_17': 'encoder.block.1.layer.0.SelfAttention.v', 'l_18': 'encoder.block.1.layer.0.SelfAttention.o', 'l_19': 'encoder.block.1.layer.0.dropout', 'l_20': 'encoder.block.1.layer.1.layer_norm', 'l_21': 'encoder.block.1.layer.1.DenseReluDense.wi', 'l_22': 'encoder.block.1.layer.1.DenseReluDense.dropout', 'l_23': 'encoder.block.1.layer.1.DenseReluDense.wo', 'l_24': 'encoder.block.1.layer.1.dropout', 'l_25': 'encoder.block.2.layer.0.layer_norm', 'l_26': 'encoder.block.2.layer.0.SelfAttention.q', 'l_27': 'encoder.block.2.layer.0.SelfAttention.k', 'l_28': 'encoder.block.2.layer.0.SelfAttention.v', 'l_29': 'encoder.block.2.layer.0.SelfAttention.o', 'l_30': 'encoder.block.2.layer.0.dropout', 'l_31': 'encoder.block.2.layer.1.layer_norm', 'l_32': 'encoder.block.2.layer.1.DenseReluDense.wi', 'l_33': 'encoder.block.2.layer.1.DenseReluDense.dropout', 'l_34': 'encoder.block.2.layer.1.DenseReluDense.wo', 'l_35': 'encoder.block.2.layer.1.dropout', 'l_36': 'decoder.embed_tokens', 'p_0': 'shared_embed_weight'} self.to(self.device) def forward(self, *args): (attention_mask, decoder_attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure) t_0 = decoder_input_ids.size() t_1 = input_ids.size() t_1 = t_1[(- 1)] t_1 = input_ids.view((- 1), t_1) t_1 = self.l_0(self.p_0, t_1) t_1 = self.l_1(t_1) t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_2 = t_2.to(dtype=torch.float32) t_2 = (1.0 - t_2) t_2 = (t_2 * (- 10000.0)) t_3 = self.l_2(t_1) t_4 = self.l_3(t_3) t_5 = self.l_4(t_3) t_6 = self.l_5(t_3) t_3 = t_3.shape t_3 = t_3[slice(None, 2, None)] t_7 = t_3[0] t_3 = t_3[1] t_4 = t_4.view(t_7, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_7, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_7, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_4, t_5) t_4 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_4 = t_4[(slice(None, None, None), None)] t_3 = torch.arange(t_3, dtype=torch.int64, device=self.device) t_3 = t_3[(None, slice(None, None, None))] t_4 = (t_3 - t_4) t_3 = torch.abs(t_4) t_4 = (t_4 > 0) t_4 = t_4.to(torch.int64) t_4 = (t_4 * 16) t_4 = (0 + t_4) t_8 = t_3.float() t_9 = (t_3 < 8) t_8 = (t_8 / 8) t_8 = torch.log(t_8) t_10 = math.log(16.0) t_10 = (t_8 / t_10) t_10 = (t_10 * 8) t_10 = t_10.to(torch.int64) t_10 = (8 + t_10) t_8 = torch.full_like(t_10, 15, device=self.device) t_8 = torch.min(t_10, t_8) t_8 = torch.where(t_9, t_3, t_8) t_4 += t_8 t_8 = t_4 t_8 = t_8.to(self.device) t_8 = self.l_6(t_8) t_8 = t_8.permute([2, 0, 1]) t_8 = t_8.unsqueeze(0) t_2 = (t_8 + t_2) t_5 += t_2 t_8 = t_5.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_8.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_7 = t_6.view(t_7, (- 1), 4096) t_7 = self.l_7(t_7) t_6 = self.l_8(t_7) t_6 = (t_1 + t_6) t_2 = (t_7, None, t_2) t_7 = t_2[0] t_6 = (t_6,) t_2 = t_2[slice(1, None, None)] t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_1 = t_6[0] t_5 = self.l_9(t_1) t_6 = t_6[1] t_2 = t_2[slice(2, None, None)] t_5 = self.l_10(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_11(t_5) t_5 = self.l_12(t_5) t_5 = self.l_13(t_5) t_5 = (t_1 + t_5) t_6 = (t_5, t_6) t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_14(t_6) t_2 = t_2[2] t_1 = self.l_15(t_5) t_8 = self.l_16(t_5) t_4 = self.l_17(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_8 = t_8.view(t_5, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_4 = t_4.view(t_5, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_1, t_8) t_8 += t_2 t_1 = t_8.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_1.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_4 = torch.matmul(t_8, t_4) t_4 = t_4.transpose(1, 2) t_4 = t_4.contiguous() t_5 = t_4.view(t_5, (- 1), 4096) t_5 = self.l_18(t_5) t_4 = self.l_19(t_5) t_4 = (t_6 + t_4) t_2 = (t_5, None, t_2) t_5 = t_2[0] t_4 = (t_4,) t_2 = t_2[slice(1, None, None)] t_2 = (t_4 + t_2) t_4 = t_2[slice(None, 2, None)] t_6 = t_4[0] t_8 = self.l_20(t_6) t_4 = t_4[1] t_2 = t_2[slice(2, None, None)] t_8 = self.l_21(t_8) t_8 = torch.nn.functional.relu(t_8, inplace=False) t_8 = self.l_22(t_8) t_8 = self.l_23(t_8) t_8 = self.l_24(t_8) t_8 = (t_6 + t_8) t_4 = (t_8, t_4) t_2 = (t_4 + t_2) t_4 = t_2[slice(None, 2, None)] t_4 = t_4[0] t_8 = self.l_25(t_4) t_2 = t_2[2] t_6 = self.l_26(t_8) t_1 = self.l_27(t_8) t_3 = self.l_28(t_8) t_8 = t_8.shape t_8 = t_8[slice(None, 2, None)] t_8 = t_8[0] t_6 = t_6.view(t_8, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_1 = t_1.view(t_8, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_3 = t_3.view(t_8, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_6, t_1) t_1 += t_2 t_6 = t_1.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_6.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_1, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_8 = t_3.view(t_8, (- 1), 4096) t_8 = self.l_29(t_8) t_3 = self.l_30(t_8) t_3 = (t_4 + t_3) t_2 = (t_8, None, t_2) t_8 = t_2[0] t_3 = (t_3,) t_2 = t_2[slice(1, None, None)] t_2 = (t_3 + t_2) t_3 = t_2[slice(None, 2, None)] t_4 = t_3[0] t_1 = self.l_31(t_4) t_3 = t_3[1] t_2 = t_2[slice(2, None, None)] t_1 = self.l_32(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_33(t_1) t_1 = self.l_34(t_1) t_1 = self.l_35(t_1) t_1 = (t_4 + t_1) t_3 = (t_1, t_3) t_1 = t_0[(- 1)] t_1 = decoder_input_ids.view((- 1), t_1) t_1 = self.l_36(self.p_0, t_1) t_4 = t_0[0] t_0 = t_0[1] t_6 = torch.arange(t_0, device=self.device) t_9 = t_6[(None, None, slice(None, None, None))] t_0 = t_9.repeat(t_4, t_0, 1) t_6 = t_6[(None, slice(None, None, None), None)] t_6 = (t_0 <= t_6) t_0 = decoder_attention_mask.dtype t_0 = t_6.to(t_0) t_0 = t_0[(slice(None, None, None), None, slice(None, None, None), slice(None, None, None))] t_6 = decoder_attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_6 = (t_0 * t_6) t_6 = t_6.to(dtype=torch.float32) t_6 = (1.0 - t_6) t_6 = (t_6 * (- 10000.0)) return list(flatten((t_2, t_3, t_1, t_6))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition1(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [(1,), (1, 1)] self.lookup = {'l_0': 'encoder.block.3.layer.0.layer_norm', 'l_1': 'encoder.block.3.layer.0.SelfAttention.q', 'l_2': 'encoder.block.3.layer.0.SelfAttention.k', 'l_3': 'encoder.block.3.layer.0.SelfAttention.v', 'l_4': 'encoder.block.3.layer.0.SelfAttention.o', 'l_5': 'encoder.block.3.layer.0.dropout', 'l_6': 'encoder.block.3.layer.1.layer_norm', 'l_7': 'encoder.block.3.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.3.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.3.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.3.layer.1.dropout', 'l_11': 'encoder.block.4.layer.0.layer_norm', 'l_12': 'encoder.block.4.layer.0.SelfAttention.q', 'l_13': 'encoder.block.4.layer.0.SelfAttention.k', 'l_14': 'encoder.block.4.layer.0.SelfAttention.v', 'l_15': 'encoder.block.4.layer.0.SelfAttention.o', 'l_16': 'encoder.block.4.layer.0.dropout', 'l_17': 'encoder.block.4.layer.1.layer_norm', 'l_18': 'encoder.block.4.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.4.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.4.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.4.layer.1.dropout', 'l_22': 'encoder.block.5.layer.0.layer_norm', 'l_23': 'encoder.block.5.layer.0.SelfAttention.q', 'l_24': 'encoder.block.5.layer.0.SelfAttention.k', 'l_25': 'encoder.block.5.layer.0.SelfAttention.v', 'l_26': 'encoder.block.5.layer.0.SelfAttention.o', 'l_27': 'encoder.block.5.layer.0.dropout', 'l_28': 'encoder.block.5.layer.1.layer_norm', 'l_29': 'encoder.block.5.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.5.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.5.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.5.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = (x1 + x0) t_1 = t_0[slice(None, 2, None)] t_1 = t_1[0] t_2 = self.l_0(t_1) t_0 = t_0[2] t_3 = self.l_1(t_2) t_4 = self.l_2(t_2) t_5 = self.l_3(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_3 = t_3.view(t_2, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_3, t_4) t_4 += t_0 t_3 = t_4.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_3.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_4(t_2) t_5 = self.l_5(t_2) t_5 = (t_1 + t_5) t_0 = (t_2, None, t_0) t_2 = t_0[0] t_5 = (t_5,) t_0 = t_0[slice(1, None, None)] t_0 = (t_5 + t_0) t_5 = t_0[slice(None, 2, None)] t_1 = t_5[0] t_4 = self.l_6(t_1) t_5 = t_5[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_5 = (t_4, t_5) t_0 = (t_5 + t_0) t_5 = t_0[slice(None, 2, None)] t_5 = t_5[0] t_4 = self.l_11(t_5) t_0 = t_0[2] t_1 = self.l_12(t_4) t_3 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_3 = t_3.view(t_4, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_3 = t_3.transpose(3, 2) t_3 = torch.matmul(t_1, t_3) t_3 += t_0 t_1 = t_3.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_3 = t_1.type_as(t_3) t_3 = torch.nn.functional.dropout(t_3, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_3, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_5 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_5 = t_6[0] t_3 = self.l_17(t_5) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_3 = self.l_18(t_3) t_3 = torch.nn.functional.relu(t_3, inplace=False) t_3 = self.l_19(t_3) t_3 = self.l_20(t_3) t_3 = self.l_21(t_3) t_3 = (t_5 + t_3) t_6 = (t_3, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_3 = self.l_22(t_6) t_0 = t_0[2] t_5 = self.l_23(t_3) t_1 = self.l_24(t_3) t_7 = self.l_25(t_3) t_3 = t_3.shape t_3 = t_3[slice(None, 2, None)] t_3 = t_3[0] t_5 = t_5.view(t_3, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_1 = t_1.view(t_3, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_3, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_5, t_1) t_1 += t_0 t_5 = t_1.float() t_5 = torch.nn.functional.softmax(t_5, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_5.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_3 = t_7.view(t_3, (- 1), 4096) t_3 = self.l_26(t_3) t_7 = self.l_27(t_3) t_7 = (t_6 + t_7) t_0 = (t_3, None, t_0) t_3 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition2(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:2'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.6.layer.0.layer_norm', 'l_1': 'encoder.block.6.layer.0.SelfAttention.q', 'l_2': 'encoder.block.6.layer.0.SelfAttention.k', 'l_3': 'encoder.block.6.layer.0.SelfAttention.v', 'l_4': 'encoder.block.6.layer.0.SelfAttention.o', 'l_5': 'encoder.block.6.layer.0.dropout', 'l_6': 'encoder.block.6.layer.1.layer_norm', 'l_7': 'encoder.block.6.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.6.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.6.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.6.layer.1.dropout', 'l_11': 'encoder.block.7.layer.0.layer_norm', 'l_12': 'encoder.block.7.layer.0.SelfAttention.q', 'l_13': 'encoder.block.7.layer.0.SelfAttention.k', 'l_14': 'encoder.block.7.layer.0.SelfAttention.v', 'l_15': 'encoder.block.7.layer.0.SelfAttention.o', 'l_16': 'encoder.block.7.layer.0.dropout', 'l_17': 'encoder.block.7.layer.1.layer_norm', 'l_18': 'encoder.block.7.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.7.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.7.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.7.layer.1.dropout', 'l_22': 'encoder.block.8.layer.0.layer_norm', 'l_23': 'encoder.block.8.layer.0.SelfAttention.q', 'l_24': 'encoder.block.8.layer.0.SelfAttention.k', 'l_25': 'encoder.block.8.layer.0.SelfAttention.v', 'l_26': 'encoder.block.8.layer.0.SelfAttention.o', 'l_27': 'encoder.block.8.layer.0.dropout', 'l_28': 'encoder.block.8.layer.1.layer_norm', 'l_29': 'encoder.block.8.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.8.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.8.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.8.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition3(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:3'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.9.layer.0.layer_norm', 'l_1': 'encoder.block.9.layer.0.SelfAttention.q', 'l_2': 'encoder.block.9.layer.0.SelfAttention.k', 'l_3': 'encoder.block.9.layer.0.SelfAttention.v', 'l_4': 'encoder.block.9.layer.0.SelfAttention.o', 'l_5': 'encoder.block.9.layer.0.dropout', 'l_6': 'encoder.block.9.layer.1.layer_norm', 'l_7': 'encoder.block.9.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.9.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.9.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.9.layer.1.dropout', 'l_11': 'encoder.block.10.layer.0.layer_norm', 'l_12': 'encoder.block.10.layer.0.SelfAttention.q', 'l_13': 'encoder.block.10.layer.0.SelfAttention.k', 'l_14': 'encoder.block.10.layer.0.SelfAttention.v', 'l_15': 'encoder.block.10.layer.0.SelfAttention.o', 'l_16': 'encoder.block.10.layer.0.dropout', 'l_17': 'encoder.block.10.layer.1.layer_norm', 'l_18': 'encoder.block.10.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.10.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.10.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.10.layer.1.dropout', 'l_22': 'encoder.block.11.layer.0.layer_norm', 'l_23': 'encoder.block.11.layer.0.SelfAttention.q', 'l_24': 'encoder.block.11.layer.0.SelfAttention.k', 'l_25': 'encoder.block.11.layer.0.SelfAttention.v', 'l_26': 'encoder.block.11.layer.0.SelfAttention.o', 'l_27': 'encoder.block.11.layer.0.dropout', 'l_28': 'encoder.block.11.layer.1.layer_norm', 'l_29': 'encoder.block.11.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.11.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.11.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.11.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition4(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:4'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.12.layer.0.layer_norm', 'l_1': 'encoder.block.12.layer.0.SelfAttention.q', 'l_2': 'encoder.block.12.layer.0.SelfAttention.k', 'l_3': 'encoder.block.12.layer.0.SelfAttention.v', 'l_4': 'encoder.block.12.layer.0.SelfAttention.o', 'l_5': 'encoder.block.12.layer.0.dropout', 'l_6': 'encoder.block.12.layer.1.layer_norm', 'l_7': 'encoder.block.12.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.12.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.12.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.12.layer.1.dropout', 'l_11': 'encoder.block.13.layer.0.layer_norm', 'l_12': 'encoder.block.13.layer.0.SelfAttention.q', 'l_13': 'encoder.block.13.layer.0.SelfAttention.k', 'l_14': 'encoder.block.13.layer.0.SelfAttention.v', 'l_15': 'encoder.block.13.layer.0.SelfAttention.o', 'l_16': 'encoder.block.13.layer.0.dropout', 'l_17': 'encoder.block.13.layer.1.layer_norm', 'l_18': 'encoder.block.13.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.13.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.13.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.13.layer.1.dropout', 'l_22': 'encoder.block.14.layer.0.layer_norm', 'l_23': 'encoder.block.14.layer.0.SelfAttention.q', 'l_24': 'encoder.block.14.layer.0.SelfAttention.k', 'l_25': 'encoder.block.14.layer.0.SelfAttention.v', 'l_26': 'encoder.block.14.layer.0.SelfAttention.o', 'l_27': 'encoder.block.14.layer.0.dropout', 'l_28': 'encoder.block.14.layer.1.layer_norm', 'l_29': 'encoder.block.14.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.14.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.14.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.14.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition5(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:5'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.15.layer.0.layer_norm', 'l_1': 'encoder.block.15.layer.0.SelfAttention.q', 'l_2': 'encoder.block.15.layer.0.SelfAttention.k', 'l_3': 'encoder.block.15.layer.0.SelfAttention.v', 'l_4': 'encoder.block.15.layer.0.SelfAttention.o', 'l_5': 'encoder.block.15.layer.0.dropout', 'l_6': 'encoder.block.15.layer.1.layer_norm', 'l_7': 'encoder.block.15.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.15.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.15.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.15.layer.1.dropout', 'l_11': 'encoder.block.16.layer.0.layer_norm', 'l_12': 'encoder.block.16.layer.0.SelfAttention.q', 'l_13': 'encoder.block.16.layer.0.SelfAttention.k', 'l_14': 'encoder.block.16.layer.0.SelfAttention.v', 'l_15': 'encoder.block.16.layer.0.SelfAttention.o', 'l_16': 'encoder.block.16.layer.0.dropout', 'l_17': 'encoder.block.16.layer.1.layer_norm', 'l_18': 'encoder.block.16.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.16.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.16.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.16.layer.1.dropout', 'l_22': 'encoder.block.17.layer.0.layer_norm', 'l_23': 'encoder.block.17.layer.0.SelfAttention.q', 'l_24': 'encoder.block.17.layer.0.SelfAttention.k', 'l_25': 'encoder.block.17.layer.0.SelfAttention.v', 'l_26': 'encoder.block.17.layer.0.SelfAttention.o', 'l_27': 'encoder.block.17.layer.0.dropout', 'l_28': 'encoder.block.17.layer.1.layer_norm', 'l_29': 'encoder.block.17.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.17.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.17.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.17.layer.1.dropout'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_0 = t_0[2] return list(flatten((t_7, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition6(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:6'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'encoder.block.18.layer.0.layer_norm', 'l_1': 'encoder.block.18.layer.0.SelfAttention.q', 'l_2': 'encoder.block.18.layer.0.SelfAttention.k', 'l_3': 'encoder.block.18.layer.0.SelfAttention.v', 'l_4': 'encoder.block.18.layer.0.SelfAttention.o', 'l_5': 'encoder.block.18.layer.0.dropout', 'l_6': 'encoder.block.18.layer.1.layer_norm', 'l_7': 'encoder.block.18.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.18.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.18.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.18.layer.1.dropout', 'l_11': 'encoder.block.19.layer.0.layer_norm', 'l_12': 'encoder.block.19.layer.0.SelfAttention.q', 'l_13': 'encoder.block.19.layer.0.SelfAttention.k', 'l_14': 'encoder.block.19.layer.0.SelfAttention.v', 'l_15': 'encoder.block.19.layer.0.SelfAttention.o', 'l_16': 'encoder.block.19.layer.0.dropout', 'l_17': 'encoder.block.19.layer.1.layer_norm', 'l_18': 'encoder.block.19.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.19.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.19.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.19.layer.1.dropout', 'l_22': 'encoder.block.20.layer.0.layer_norm', 'l_23': 'encoder.block.20.layer.0.SelfAttention.q', 'l_24': 'encoder.block.20.layer.0.SelfAttention.k', 'l_25': 'encoder.block.20.layer.0.SelfAttention.v', 'l_26': 'encoder.block.20.layer.0.SelfAttention.o', 'l_27': 'encoder.block.20.layer.0.dropout', 'l_28': 'encoder.block.20.layer.1.layer_norm', 'l_29': 'encoder.block.20.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.20.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.20.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.20.layer.1.dropout', 'l_33': 'encoder.block.21.layer.0.layer_norm', 'l_34': 'encoder.block.21.layer.0.SelfAttention.q'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x0) t_1 = self.l_1(t_0) t_2 = self.l_2(t_0) t_3 = self.l_3(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_1 = t_1.view(t_0, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += x1 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_4(t_0) t_3 = self.l_5(t_0) t_3 = (x0 + t_3) t_0 = (t_0, None, x1) t_2 = t_0[0] t_3 = (t_3,) t_0 = t_0[slice(1, None, None)] t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_1 = t_3[0] t_4 = self.l_6(t_1) t_3 = t_3[1] t_0 = t_0[slice(2, None, None)] t_4 = self.l_7(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_8(t_4) t_4 = self.l_9(t_4) t_4 = self.l_10(t_4) t_4 = (t_1 + t_4) t_3 = (t_4, t_3) t_0 = (t_3 + t_0) t_3 = t_0[slice(None, 2, None)] t_3 = t_3[0] t_4 = self.l_11(t_3) t_0 = t_0[2] t_1 = self.l_12(t_4) t_5 = self.l_13(t_4) t_6 = self.l_14(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_1 = t_1.view(t_4, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_1, t_5) t_5 += t_0 t_1 = t_5.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_1.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_15(t_4) t_6 = self.l_16(t_4) t_6 = (t_3 + t_6) t_0 = (t_4, None, t_0) t_4 = t_0[0] t_6 = (t_6,) t_0 = t_0[slice(1, None, None)] t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_3 = t_6[0] t_5 = self.l_17(t_3) t_6 = t_6[1] t_0 = t_0[slice(2, None, None)] t_5 = self.l_18(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_19(t_5) t_5 = self.l_20(t_5) t_5 = self.l_21(t_5) t_5 = (t_3 + t_5) t_6 = (t_5, t_6) t_0 = (t_6 + t_0) t_6 = t_0[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_22(t_6) t_0 = t_0[2] t_3 = self.l_23(t_5) t_1 = self.l_24(t_5) t_7 = self.l_25(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_1 = t_1.transpose(3, 2) t_1 = torch.matmul(t_3, t_1) t_1 += t_0 t_3 = t_1.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_1 = t_3.type_as(t_1) t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_1, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_26(t_5) t_7 = self.l_27(t_5) t_7 = (t_6 + t_7) t_0 = (t_5, None, t_0) t_5 = t_0[0] t_7 = (t_7,) t_0 = t_0[slice(1, None, None)] t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_6 = t_7[0] t_1 = self.l_28(t_6) t_7 = t_7[1] t_0 = t_0[slice(2, None, None)] t_1 = self.l_29(t_1) t_1 = torch.nn.functional.relu(t_1, inplace=False) t_1 = self.l_30(t_1) t_1 = self.l_31(t_1) t_1 = self.l_32(t_1) t_1 = (t_6 + t_1) t_7 = (t_1, t_7) t_0 = (t_7 + t_0) t_7 = t_0[slice(None, 2, None)] t_7 = t_7[0] t_1 = self.l_33(t_7) t_0 = t_0[2] t_6 = self.l_34(t_1) return list(flatten((t_7, t_1, t_0, t_6))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition7(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:7'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'encoder.block.21.layer.0.SelfAttention.k', 'l_1': 'encoder.block.21.layer.0.SelfAttention.v', 'l_2': 'encoder.block.21.layer.0.SelfAttention.o', 'l_3': 'encoder.block.21.layer.0.dropout', 'l_4': 'encoder.block.21.layer.1.layer_norm', 'l_5': 'encoder.block.21.layer.1.DenseReluDense.wi', 'l_6': 'encoder.block.21.layer.1.DenseReluDense.dropout', 'l_7': 'encoder.block.21.layer.1.DenseReluDense.wo', 'l_8': 'encoder.block.21.layer.1.dropout', 'l_9': 'encoder.block.22.layer.0.layer_norm', 'l_10': 'encoder.block.22.layer.0.SelfAttention.q', 'l_11': 'encoder.block.22.layer.0.SelfAttention.k', 'l_12': 'encoder.block.22.layer.0.SelfAttention.v', 'l_13': 'encoder.block.22.layer.0.SelfAttention.o', 'l_14': 'encoder.block.22.layer.0.dropout', 'l_15': 'encoder.block.22.layer.1.layer_norm', 'l_16': 'encoder.block.22.layer.1.DenseReluDense.wi', 'l_17': 'encoder.block.22.layer.1.DenseReluDense.dropout', 'l_18': 'encoder.block.22.layer.1.DenseReluDense.wo', 'l_19': 'encoder.block.22.layer.1.dropout', 'l_20': 'encoder.block.23.layer.0.layer_norm', 'l_21': 'encoder.block.23.layer.0.SelfAttention.q', 'l_22': 'encoder.block.23.layer.0.SelfAttention.k', 'l_23': 'encoder.block.23.layer.0.SelfAttention.v', 'l_24': 'encoder.block.23.layer.0.SelfAttention.o', 'l_25': 'encoder.block.23.layer.0.dropout', 'l_26': 'encoder.block.23.layer.1.layer_norm', 'l_27': 'encoder.block.23.layer.1.DenseReluDense.wi', 'l_28': 'encoder.block.23.layer.1.DenseReluDense.dropout', 'l_29': 'encoder.block.23.layer.1.DenseReluDense.wo', 'l_30': 'encoder.block.23.layer.1.dropout', 'l_31': 'encoder.final_layer_norm', 'l_32': 'encoder.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_1 = self.l_1(x1) t_2 = x1.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_3 = x3.view(t_2, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_0 = t_0.view(t_2, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_3, t_0) t_0 += x2 t_3 = t_0.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_3.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_2 = t_1.view(t_2, (- 1), 4096) t_2 = self.l_2(t_2) t_1 = self.l_3(t_2) t_1 = (x0 + t_1) t_2 = (t_2, None, x2) t_0 = t_2[0] t_1 = (t_1,) t_2 = t_2[slice(1, None, None)] t_2 = (t_1 + t_2) t_1 = t_2[slice(None, 2, None)] t_3 = t_1[0] t_4 = self.l_4(t_3) t_1 = t_1[1] t_2 = t_2[slice(2, None, None)] t_4 = self.l_5(t_4) t_4 = torch.nn.functional.relu(t_4, inplace=False) t_4 = self.l_6(t_4) t_4 = self.l_7(t_4) t_4 = self.l_8(t_4) t_4 = (t_3 + t_4) t_1 = (t_4, t_1) t_2 = (t_1 + t_2) t_1 = t_2[slice(None, 2, None)] t_1 = t_1[0] t_4 = self.l_9(t_1) t_2 = t_2[2] t_3 = self.l_10(t_4) t_5 = self.l_11(t_4) t_6 = self.l_12(t_4) t_4 = t_4.shape t_4 = t_4[slice(None, 2, None)] t_4 = t_4[0] t_3 = t_3.view(t_4, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_5 = t_5.view(t_4, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_6 = t_6.view(t_4, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_5 = t_5.transpose(3, 2) t_5 = torch.matmul(t_3, t_5) t_5 += t_2 t_3 = t_5.float() t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None) t_5 = t_3.type_as(t_5) t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_5, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_4 = t_6.view(t_4, (- 1), 4096) t_4 = self.l_13(t_4) t_6 = self.l_14(t_4) t_6 = (t_1 + t_6) t_2 = (t_4, None, t_2) t_4 = t_2[0] t_6 = (t_6,) t_2 = t_2[slice(1, None, None)] t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_1 = t_6[0] t_5 = self.l_15(t_1) t_6 = t_6[1] t_2 = t_2[slice(2, None, None)] t_5 = self.l_16(t_5) t_5 = torch.nn.functional.relu(t_5, inplace=False) t_5 = self.l_17(t_5) t_5 = self.l_18(t_5) t_5 = self.l_19(t_5) t_5 = (t_1 + t_5) t_6 = (t_5, t_6) t_2 = (t_6 + t_2) t_6 = t_2[slice(None, 2, None)] t_6 = t_6[0] t_5 = self.l_20(t_6) t_2 = t_2[2] t_1 = self.l_21(t_5) t_3 = self.l_22(t_5) t_7 = self.l_23(t_5) t_5 = t_5.shape t_5 = t_5[slice(None, 2, None)] t_5 = t_5[0] t_1 = t_1.view(t_5, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_3 = t_3.view(t_5, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_7 = t_7.view(t_5, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_3 = t_3.transpose(3, 2) t_3 = torch.matmul(t_1, t_3) t_3 += t_2 t_1 = t_3.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_3 = t_1.type_as(t_3) t_3 = torch.nn.functional.dropout(t_3, p=0.1, training=self.training, inplace=False) t_7 = torch.matmul(t_3, t_7) t_7 = t_7.transpose(1, 2) t_7 = t_7.contiguous() t_5 = t_7.view(t_5, (- 1), 4096) t_5 = self.l_24(t_5) t_7 = self.l_25(t_5) t_7 = (t_6 + t_7) t_2 = (t_5, None, t_2) t_5 = t_2[0] t_7 = (t_7,) t_2 = t_2[slice(1, None, None)] t_2 = (t_7 + t_2) t_7 = t_2[slice(None, 2, None)] t_6 = t_7[0] t_3 = self.l_26(t_6) t_7 = t_7[1] t_2 = t_2[slice(2, None, None)] t_3 = self.l_27(t_3) t_3 = torch.nn.functional.relu(t_3, inplace=False) t_3 = self.l_28(t_3) t_3 = self.l_29(t_3) t_3 = self.l_30(t_3) t_3 = (t_6 + t_3) t_7 = (t_3, t_7) t_2 = (t_7 + t_2) t_7 = t_2[slice(None, 2, None)] t_7 = t_7[0] t_7 = self.l_31(t_7) t_2 = t_2[2] t_7 = self.l_32(t_7) return (t_7,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition8(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Embedding[relative_attention_bias]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:8'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.dropout', 'l_1': 'decoder.block.0.layer.0.layer_norm', 'l_2': 'decoder.block.0.layer.0.SelfAttention.q', 'l_3': 'decoder.block.0.layer.0.SelfAttention.k', 'l_4': 'decoder.block.0.layer.0.SelfAttention.v', 'l_5': 'decoder.block.0.layer.0.SelfAttention.relative_attention_bias', 'l_6': 'decoder.block.0.layer.0.SelfAttention.o', 'l_7': 'decoder.block.0.layer.0.dropout', 'l_8': 'decoder.block.0.layer.1.layer_norm', 'l_9': 'decoder.block.0.layer.1.EncDecAttention.q', 'l_10': 'decoder.block.0.layer.1.EncDecAttention.k', 'l_11': 'decoder.block.0.layer.1.EncDecAttention.v', 'l_12': 'decoder.block.0.layer.1.EncDecAttention.o', 'l_13': 'decoder.block.0.layer.1.dropout', 'l_14': 'decoder.block.0.layer.2.layer_norm', 'l_15': 'decoder.block.0.layer.2.DenseReluDense.wi', 'l_16': 'decoder.block.0.layer.2.DenseReluDense.dropout', 'l_17': 'decoder.block.0.layer.2.DenseReluDense.wo', 'l_18': 'decoder.block.0.layer.2.dropout', 'l_19': 'decoder.block.1.layer.0.layer_norm', 'l_20': 'decoder.block.1.layer.0.SelfAttention.q', 'l_21': 'decoder.block.1.layer.0.SelfAttention.k', 'l_22': 'decoder.block.1.layer.0.SelfAttention.v', 'l_23': 'decoder.block.1.layer.0.SelfAttention.o', 'l_24': 'decoder.block.1.layer.0.dropout', 'l_25': 'decoder.block.1.layer.1.layer_norm', 'l_26': 'decoder.block.1.layer.1.EncDecAttention.q', 'l_27': 'decoder.block.1.layer.1.EncDecAttention.k', 'l_28': 'decoder.block.1.layer.1.EncDecAttention.v', 'l_29': 'decoder.block.1.layer.1.EncDecAttention.o', 'l_30': 'decoder.block.1.layer.1.dropout', 'l_31': 'decoder.block.1.layer.2.layer_norm', 'l_32': 'decoder.block.1.layer.2.DenseReluDense.wi', 'l_33': 'decoder.block.1.layer.2.DenseReluDense.dropout', 'l_34': 'decoder.block.1.layer.2.DenseReluDense.wo', 'l_35': 'decoder.block.1.layer.2.dropout', 'l_36': 'decoder.block.2.layer.0.layer_norm', 'l_37': 'decoder.block.2.layer.0.SelfAttention.q', 'l_38': 'decoder.block.2.layer.0.SelfAttention.k', 'l_39': 'decoder.block.2.layer.0.SelfAttention.v', 'l_40': 'decoder.block.2.layer.0.SelfAttention.o', 'l_41': 'decoder.block.2.layer.0.dropout', 'l_42': 'decoder.block.2.layer.1.layer_norm', 'l_43': 'decoder.block.2.layer.1.EncDecAttention.q', 'l_44': 'decoder.block.2.layer.1.EncDecAttention.k', 'l_45': 'decoder.block.2.layer.1.EncDecAttention.v', 'l_46': 'decoder.block.2.layer.1.EncDecAttention.o', 'l_47': 'decoder.block.2.layer.1.dropout', 'l_48': 'decoder.block.2.layer.2.layer_norm', 'l_49': 'decoder.block.2.layer.2.DenseReluDense.wi', 'l_50': 'decoder.block.2.layer.2.DenseReluDense.dropout', 'l_51': 'decoder.block.2.layer.2.DenseReluDense.wo', 'l_52': 'decoder.block.2.layer.2.dropout'} self.to(self.device) def forward(self, *args): (attention_mask, x0, x1, x2) = unflatten(args, self.input_structure) t_0 = self.l_10(x0) t_1 = self.l_11(x0) t_2 = self.l_27(x0) t_3 = self.l_28(x0) t_4 = self.l_44(x0) t_5 = self.l_45(x0) t_6 = self.l_0(x1) t_7 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))] t_7 = t_7.to(dtype=torch.float32) t_7 = (1.0 - t_7) t_7 = (t_7 * (- 1000000000.0)) t_8 = self.l_1(t_6) t_9 = self.l_2(t_8) t_10 = self.l_3(t_8) t_11 = self.l_4(t_8) t_8 = t_8.shape t_8 = t_8[slice(None, 2, None)] t_12 = t_8[0] t_8 = t_8[1] t_9 = t_9.view(t_12, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_10 = t_10.view(t_12, (- 1), 32, 128) t_10 = t_10.transpose(1, 2) t_11 = t_11.view(t_12, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_10 = t_10.transpose(3, 2) t_10 = torch.matmul(t_9, t_10) t_9 = torch.arange(t_8, dtype=torch.int64, device=self.device) t_9 = t_9[(slice(None, None, None), None)] t_8 = torch.arange(t_8, dtype=torch.int64, device=self.device) t_8 = t_8[(None, slice(None, None, None))] t_9 = (t_8 - t_9) t_8 = torch.zeros_like(t_9, device=self.device) t_8 = torch.min(t_9, t_8) t_8 = (- t_8) t_9 = t_8.float() t_13 = (t_8 < 16) t_9 = (t_9 / 16) t_9 = torch.log(t_9) t_14 = math.log(8.0) t_14 = (t_9 / t_14) t_14 = (t_14 * 16) t_14 = t_14.to(torch.int64) t_14 = (16 + t_14) t_9 = torch.full_like(t_14, 31, device=self.device) t_9 = torch.min(t_14, t_9) t_9 = torch.where(t_13, t_8, t_9) t_9 = (0 + t_9) t_9 = t_9.to(self.device) t_9 = self.l_5(t_9) t_9 = t_9.permute([2, 0, 1]) t_9 = t_9.unsqueeze(0) t_9 = (t_9 + x2) t_10 += t_9 t_8 = t_10.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_10 = t_8.type_as(t_10) t_10 = torch.nn.functional.dropout(t_10, p=0.1, training=self.training, inplace=False) t_11 = torch.matmul(t_10, t_11) t_11 = t_11.transpose(1, 2) t_11 = t_11.contiguous() t_12 = t_11.view(t_12, (- 1), 4096) t_12 = self.l_6(t_12) t_11 = self.l_7(t_12) t_11 = (t_6 + t_11) t_9 = (t_12, None, t_9) t_12 = t_9[0] t_11 = (t_11,) t_9 = t_9[slice(1, None, None)] t_9 = (t_11 + t_9) t_11 = t_9[slice(None, 2, None)] t_6 = t_11[0] t_10 = self.l_8(t_6) t_11 = t_11[1] t_9 = t_9[slice(2, None, None)] t_8 = self.l_9(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_13 = t_10[0] t_10 = t_10[1] t_14 = x0.shape t_14 = t_14[1] t_8 = t_8.view(t_13, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_0 = t_0.view(t_13, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_13, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_8, t_0) t_8 = t_0.dtype t_14 = (1, 32, t_10, t_14) t_8 = torch.zeros(t_14, device=self.device, dtype=t_8) t_7 = (t_8 + t_7) t_0 += t_7 t_8 = t_0.float() t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_8.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_13 = t_1.view(t_13, (- 1), 4096) t_13 = self.l_12(t_13) t_1 = self.l_13(t_13) t_1 = (t_6 + t_1) t_7 = (t_13, None, t_7) t_13 = t_7[0] t_1 = (t_1,) t_7 = t_7[slice(1, None, None)] t_7 = (t_1 + t_7) t_1 = t_7[0] t_6 = self.l_14(t_1) t_7 = t_7[slice(2, None, None)] t_7 = (t_9 + t_7) t_6 = self.l_15(t_6) t_6 = torch.nn.functional.relu(t_6, inplace=False) t_6 = self.l_16(t_6) t_6 = self.l_17(t_6) t_6 = self.l_18(t_6) t_6 = (t_1 + t_6) t_11 = (t_6, t_11) t_7 = (t_11 + t_7) t_11 = t_7[slice(None, 2, None)] t_11 = t_11[0] t_6 = self.l_19(t_11) t_1 = t_7[2] t_7 = t_7[3] t_9 = self.l_20(t_6) t_0 = self.l_21(t_6) t_8 = self.l_22(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_0 = t_0.view(t_6, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_9, t_0) t_0 += t_1 t_9 = t_0.float() t_9 = torch.nn.functional.softmax(t_9, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_9.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_8 = torch.matmul(t_0, t_8) t_8 = t_8.transpose(1, 2) t_8 = t_8.contiguous() t_6 = t_8.view(t_6, (- 1), 4096) t_6 = self.l_23(t_6) t_8 = self.l_24(t_6) t_8 = (t_11 + t_8) t_1 = (t_6, None, t_1) t_6 = t_1[0] t_8 = (t_8,) t_1 = t_1[slice(1, None, None)] t_1 = (t_8 + t_1) t_8 = t_1[slice(None, 2, None)] t_11 = t_8[0] t_0 = self.l_25(t_11) t_8 = t_8[1] t_1 = t_1[slice(2, None, None)] t_9 = self.l_26(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_9 = t_9.view(t_0, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_2 = t_2.view(t_0, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_0, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_9, t_2) t_2 += t_7 t_9 = t_2.float() t_9 = torch.nn.functional.softmax(t_9, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_9.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_0 = t_3.view(t_0, (- 1), 4096) t_0 = self.l_29(t_0) t_3 = self.l_30(t_0) t_3 = (t_11 + t_3) t_7 = (t_0, None, t_7) t_0 = t_7[0] t_3 = (t_3,) t_7 = t_7[slice(1, None, None)] t_7 = (t_3 + t_7) t_3 = t_7[0] t_11 = self.l_31(t_3) t_7 = t_7[slice(2, None, None)] t_7 = (t_1 + t_7) t_11 = self.l_32(t_11) t_11 = torch.nn.functional.relu(t_11, inplace=False) t_11 = self.l_33(t_11) t_11 = self.l_34(t_11) t_11 = self.l_35(t_11) t_11 = (t_3 + t_11) t_8 = (t_11, t_8) t_7 = (t_8 + t_7) t_8 = t_7[slice(None, 2, None)] t_8 = t_8[0] t_11 = self.l_36(t_8) t_3 = t_7[2] t_7 = t_7[3] t_1 = self.l_37(t_11) t_2 = self.l_38(t_11) t_9 = self.l_39(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_1 = t_1.view(t_11, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_9 = t_9.view(t_11, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_2, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_11 = t_9.view(t_11, (- 1), 4096) t_11 = self.l_40(t_11) t_9 = self.l_41(t_11) t_9 = (t_8 + t_9) t_3 = (t_11, None, t_3) t_11 = t_3[0] t_9 = (t_9,) t_3 = t_3[slice(1, None, None)] t_3 = (t_9 + t_3) t_9 = t_3[slice(None, 2, None)] t_8 = t_9[0] t_2 = self.l_42(t_8) t_9 = t_9[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_43(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_7 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_46(t_2) t_5 = self.l_47(t_2) t_5 = (t_8 + t_5) t_7 = (t_2, None, t_7) t_2 = t_7[0] t_5 = (t_5,) t_7 = t_7[slice(1, None, None)] t_7 = (t_5 + t_7) t_5 = t_7[0] t_8 = self.l_48(t_5) t_7 = t_7[slice(2, None, None)] t_7 = (t_3 + t_7) t_8 = self.l_49(t_8) t_8 = torch.nn.functional.relu(t_8, inplace=False) t_8 = self.l_50(t_8) t_8 = self.l_51(t_8) t_8 = self.l_52(t_8) t_8 = (t_5 + t_8) t_9 = (t_8, t_9) t_7 = (t_9 + t_7) t_9 = t_7[slice(None, 2, None)] t_9 = t_9[0] t_8 = t_7[2] t_7 = t_7[3] return list(flatten((x0, t_9, t_8, t_7))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition9(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:9'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.3.layer.0.layer_norm', 'l_1': 'decoder.block.3.layer.0.SelfAttention.q', 'l_2': 'decoder.block.3.layer.0.SelfAttention.k', 'l_3': 'decoder.block.3.layer.0.SelfAttention.v', 'l_4': 'decoder.block.3.layer.0.SelfAttention.o', 'l_5': 'decoder.block.3.layer.0.dropout', 'l_6': 'decoder.block.3.layer.1.layer_norm', 'l_7': 'decoder.block.3.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.3.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.3.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.3.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.3.layer.1.dropout', 'l_12': 'decoder.block.3.layer.2.layer_norm', 'l_13': 'decoder.block.3.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.3.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.3.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.3.layer.2.dropout', 'l_17': 'decoder.block.4.layer.0.layer_norm', 'l_18': 'decoder.block.4.layer.0.SelfAttention.q', 'l_19': 'decoder.block.4.layer.0.SelfAttention.k', 'l_20': 'decoder.block.4.layer.0.SelfAttention.v', 'l_21': 'decoder.block.4.layer.0.SelfAttention.o', 'l_22': 'decoder.block.4.layer.0.dropout', 'l_23': 'decoder.block.4.layer.1.layer_norm', 'l_24': 'decoder.block.4.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.4.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.4.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.4.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.4.layer.1.dropout', 'l_29': 'decoder.block.4.layer.2.layer_norm', 'l_30': 'decoder.block.4.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.4.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.4.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.4.layer.2.dropout', 'l_34': 'decoder.block.5.layer.0.layer_norm', 'l_35': 'decoder.block.5.layer.0.SelfAttention.q', 'l_36': 'decoder.block.5.layer.0.SelfAttention.k', 'l_37': 'decoder.block.5.layer.0.SelfAttention.v', 'l_38': 'decoder.block.5.layer.0.SelfAttention.o', 'l_39': 'decoder.block.5.layer.0.dropout', 'l_40': 'decoder.block.5.layer.1.layer_norm', 'l_41': 'decoder.block.5.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.5.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.5.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.5.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.5.layer.1.dropout', 'l_46': 'decoder.block.5.layer.2.layer_norm', 'l_47': 'decoder.block.5.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.5.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.5.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.5.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition10(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:10'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.6.layer.0.layer_norm', 'l_1': 'decoder.block.6.layer.0.SelfAttention.q', 'l_2': 'decoder.block.6.layer.0.SelfAttention.k', 'l_3': 'decoder.block.6.layer.0.SelfAttention.v', 'l_4': 'decoder.block.6.layer.0.SelfAttention.o', 'l_5': 'decoder.block.6.layer.0.dropout', 'l_6': 'decoder.block.6.layer.1.layer_norm', 'l_7': 'decoder.block.6.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.6.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.6.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.6.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.6.layer.1.dropout', 'l_12': 'decoder.block.6.layer.2.layer_norm', 'l_13': 'decoder.block.6.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.6.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.6.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.6.layer.2.dropout', 'l_17': 'decoder.block.7.layer.0.layer_norm', 'l_18': 'decoder.block.7.layer.0.SelfAttention.q', 'l_19': 'decoder.block.7.layer.0.SelfAttention.k', 'l_20': 'decoder.block.7.layer.0.SelfAttention.v', 'l_21': 'decoder.block.7.layer.0.SelfAttention.o', 'l_22': 'decoder.block.7.layer.0.dropout', 'l_23': 'decoder.block.7.layer.1.layer_norm', 'l_24': 'decoder.block.7.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.7.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.7.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.7.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.7.layer.1.dropout', 'l_29': 'decoder.block.7.layer.2.layer_norm', 'l_30': 'decoder.block.7.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.7.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.7.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.7.layer.2.dropout', 'l_34': 'decoder.block.8.layer.0.layer_norm', 'l_35': 'decoder.block.8.layer.0.SelfAttention.q', 'l_36': 'decoder.block.8.layer.0.SelfAttention.k', 'l_37': 'decoder.block.8.layer.0.SelfAttention.v', 'l_38': 'decoder.block.8.layer.0.SelfAttention.o', 'l_39': 'decoder.block.8.layer.0.dropout', 'l_40': 'decoder.block.8.layer.1.layer_norm', 'l_41': 'decoder.block.8.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.8.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.8.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.8.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.8.layer.1.dropout', 'l_46': 'decoder.block.8.layer.2.layer_norm', 'l_47': 'decoder.block.8.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.8.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.8.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.8.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition11(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:11'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.9.layer.0.layer_norm', 'l_1': 'decoder.block.9.layer.0.SelfAttention.q', 'l_2': 'decoder.block.9.layer.0.SelfAttention.k', 'l_3': 'decoder.block.9.layer.0.SelfAttention.v', 'l_4': 'decoder.block.9.layer.0.SelfAttention.o', 'l_5': 'decoder.block.9.layer.0.dropout', 'l_6': 'decoder.block.9.layer.1.layer_norm', 'l_7': 'decoder.block.9.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.9.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.9.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.9.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.9.layer.1.dropout', 'l_12': 'decoder.block.9.layer.2.layer_norm', 'l_13': 'decoder.block.9.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.9.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.9.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.9.layer.2.dropout', 'l_17': 'decoder.block.10.layer.0.layer_norm', 'l_18': 'decoder.block.10.layer.0.SelfAttention.q', 'l_19': 'decoder.block.10.layer.0.SelfAttention.k', 'l_20': 'decoder.block.10.layer.0.SelfAttention.v', 'l_21': 'decoder.block.10.layer.0.SelfAttention.o', 'l_22': 'decoder.block.10.layer.0.dropout', 'l_23': 'decoder.block.10.layer.1.layer_norm', 'l_24': 'decoder.block.10.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.10.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.10.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.10.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.10.layer.1.dropout', 'l_29': 'decoder.block.10.layer.2.layer_norm', 'l_30': 'decoder.block.10.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.10.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.10.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.10.layer.2.dropout', 'l_34': 'decoder.block.11.layer.0.layer_norm', 'l_35': 'decoder.block.11.layer.0.SelfAttention.q', 'l_36': 'decoder.block.11.layer.0.SelfAttention.k', 'l_37': 'decoder.block.11.layer.0.SelfAttention.v', 'l_38': 'decoder.block.11.layer.0.SelfAttention.o', 'l_39': 'decoder.block.11.layer.0.dropout', 'l_40': 'decoder.block.11.layer.1.layer_norm', 'l_41': 'decoder.block.11.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.11.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.11.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.11.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.11.layer.1.dropout', 'l_46': 'decoder.block.11.layer.2.layer_norm', 'l_47': 'decoder.block.11.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.11.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.11.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.11.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition12(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:12'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.12.layer.0.layer_norm', 'l_1': 'decoder.block.12.layer.0.SelfAttention.q', 'l_2': 'decoder.block.12.layer.0.SelfAttention.k', 'l_3': 'decoder.block.12.layer.0.SelfAttention.v', 'l_4': 'decoder.block.12.layer.0.SelfAttention.o', 'l_5': 'decoder.block.12.layer.0.dropout', 'l_6': 'decoder.block.12.layer.1.layer_norm', 'l_7': 'decoder.block.12.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.12.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.12.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.12.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.12.layer.1.dropout', 'l_12': 'decoder.block.12.layer.2.layer_norm', 'l_13': 'decoder.block.12.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.12.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.12.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.12.layer.2.dropout', 'l_17': 'decoder.block.13.layer.0.layer_norm', 'l_18': 'decoder.block.13.layer.0.SelfAttention.q', 'l_19': 'decoder.block.13.layer.0.SelfAttention.k', 'l_20': 'decoder.block.13.layer.0.SelfAttention.v', 'l_21': 'decoder.block.13.layer.0.SelfAttention.o', 'l_22': 'decoder.block.13.layer.0.dropout', 'l_23': 'decoder.block.13.layer.1.layer_norm', 'l_24': 'decoder.block.13.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.13.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.13.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.13.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.13.layer.1.dropout', 'l_29': 'decoder.block.13.layer.2.layer_norm', 'l_30': 'decoder.block.13.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.13.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.13.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.13.layer.2.dropout', 'l_34': 'decoder.block.14.layer.0.layer_norm', 'l_35': 'decoder.block.14.layer.0.SelfAttention.q', 'l_36': 'decoder.block.14.layer.0.SelfAttention.k', 'l_37': 'decoder.block.14.layer.0.SelfAttention.v', 'l_38': 'decoder.block.14.layer.0.SelfAttention.o', 'l_39': 'decoder.block.14.layer.0.dropout', 'l_40': 'decoder.block.14.layer.1.layer_norm', 'l_41': 'decoder.block.14.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.14.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.14.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.14.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.14.layer.1.dropout', 'l_46': 'decoder.block.14.layer.2.layer_norm', 'l_47': 'decoder.block.14.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.14.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.14.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.14.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition13(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:13'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.15.layer.0.layer_norm', 'l_1': 'decoder.block.15.layer.0.SelfAttention.q', 'l_2': 'decoder.block.15.layer.0.SelfAttention.k', 'l_3': 'decoder.block.15.layer.0.SelfAttention.v', 'l_4': 'decoder.block.15.layer.0.SelfAttention.o', 'l_5': 'decoder.block.15.layer.0.dropout', 'l_6': 'decoder.block.15.layer.1.layer_norm', 'l_7': 'decoder.block.15.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.15.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.15.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.15.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.15.layer.1.dropout', 'l_12': 'decoder.block.15.layer.2.layer_norm', 'l_13': 'decoder.block.15.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.15.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.15.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.15.layer.2.dropout', 'l_17': 'decoder.block.16.layer.0.layer_norm', 'l_18': 'decoder.block.16.layer.0.SelfAttention.q', 'l_19': 'decoder.block.16.layer.0.SelfAttention.k', 'l_20': 'decoder.block.16.layer.0.SelfAttention.v', 'l_21': 'decoder.block.16.layer.0.SelfAttention.o', 'l_22': 'decoder.block.16.layer.0.dropout', 'l_23': 'decoder.block.16.layer.1.layer_norm', 'l_24': 'decoder.block.16.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.16.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.16.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.16.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.16.layer.1.dropout', 'l_29': 'decoder.block.16.layer.2.layer_norm', 'l_30': 'decoder.block.16.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.16.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.16.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.16.layer.2.dropout', 'l_34': 'decoder.block.17.layer.0.layer_norm', 'l_35': 'decoder.block.17.layer.0.SelfAttention.q', 'l_36': 'decoder.block.17.layer.0.SelfAttention.k', 'l_37': 'decoder.block.17.layer.0.SelfAttention.v', 'l_38': 'decoder.block.17.layer.0.SelfAttention.o', 'l_39': 'decoder.block.17.layer.0.dropout', 'l_40': 'decoder.block.17.layer.1.layer_norm', 'l_41': 'decoder.block.17.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.17.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.17.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.17.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.17.layer.1.dropout', 'l_46': 'decoder.block.17.layer.2.layer_norm', 'l_47': 'decoder.block.17.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.17.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.17.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.17.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition14(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:14'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.18.layer.0.layer_norm', 'l_1': 'decoder.block.18.layer.0.SelfAttention.q', 'l_2': 'decoder.block.18.layer.0.SelfAttention.k', 'l_3': 'decoder.block.18.layer.0.SelfAttention.v', 'l_4': 'decoder.block.18.layer.0.SelfAttention.o', 'l_5': 'decoder.block.18.layer.0.dropout', 'l_6': 'decoder.block.18.layer.1.layer_norm', 'l_7': 'decoder.block.18.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.18.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.18.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.18.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.18.layer.1.dropout', 'l_12': 'decoder.block.18.layer.2.layer_norm', 'l_13': 'decoder.block.18.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.18.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.18.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.18.layer.2.dropout', 'l_17': 'decoder.block.19.layer.0.layer_norm', 'l_18': 'decoder.block.19.layer.0.SelfAttention.q', 'l_19': 'decoder.block.19.layer.0.SelfAttention.k', 'l_20': 'decoder.block.19.layer.0.SelfAttention.v', 'l_21': 'decoder.block.19.layer.0.SelfAttention.o', 'l_22': 'decoder.block.19.layer.0.dropout', 'l_23': 'decoder.block.19.layer.1.layer_norm', 'l_24': 'decoder.block.19.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.19.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.19.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.19.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.19.layer.1.dropout', 'l_29': 'decoder.block.19.layer.2.layer_norm', 'l_30': 'decoder.block.19.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.19.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.19.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.19.layer.2.dropout', 'l_34': 'decoder.block.20.layer.0.layer_norm', 'l_35': 'decoder.block.20.layer.0.SelfAttention.q', 'l_36': 'decoder.block.20.layer.0.SelfAttention.k', 'l_37': 'decoder.block.20.layer.0.SelfAttention.v', 'l_38': 'decoder.block.20.layer.0.SelfAttention.o', 'l_39': 'decoder.block.20.layer.0.dropout', 'l_40': 'decoder.block.20.layer.1.layer_norm', 'l_41': 'decoder.block.20.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.20.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.20.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.20.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.20.layer.1.dropout', 'l_46': 'decoder.block.20.layer.2.layer_norm', 'l_47': 'decoder.block.20.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.20.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.20.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.20.layer.2.dropout'} self.to(self.device) def forward(self, *args): (x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_12 = t_10[2] t_10 = t_10[3] return list(flatten((x0, t_6, t_12, t_10))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
class Partition15(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:15'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1, 1, 1, 1] self.lookup = {'l_0': 'decoder.block.21.layer.0.layer_norm', 'l_1': 'decoder.block.21.layer.0.SelfAttention.q', 'l_2': 'decoder.block.21.layer.0.SelfAttention.k', 'l_3': 'decoder.block.21.layer.0.SelfAttention.v', 'l_4': 'decoder.block.21.layer.0.SelfAttention.o', 'l_5': 'decoder.block.21.layer.0.dropout', 'l_6': 'decoder.block.21.layer.1.layer_norm', 'l_7': 'decoder.block.21.layer.1.EncDecAttention.q', 'l_8': 'decoder.block.21.layer.1.EncDecAttention.k', 'l_9': 'decoder.block.21.layer.1.EncDecAttention.v', 'l_10': 'decoder.block.21.layer.1.EncDecAttention.o', 'l_11': 'decoder.block.21.layer.1.dropout', 'l_12': 'decoder.block.21.layer.2.layer_norm', 'l_13': 'decoder.block.21.layer.2.DenseReluDense.wi', 'l_14': 'decoder.block.21.layer.2.DenseReluDense.dropout', 'l_15': 'decoder.block.21.layer.2.DenseReluDense.wo', 'l_16': 'decoder.block.21.layer.2.dropout', 'l_17': 'decoder.block.22.layer.0.layer_norm', 'l_18': 'decoder.block.22.layer.0.SelfAttention.q', 'l_19': 'decoder.block.22.layer.0.SelfAttention.k', 'l_20': 'decoder.block.22.layer.0.SelfAttention.v', 'l_21': 'decoder.block.22.layer.0.SelfAttention.o', 'l_22': 'decoder.block.22.layer.0.dropout', 'l_23': 'decoder.block.22.layer.1.layer_norm', 'l_24': 'decoder.block.22.layer.1.EncDecAttention.q', 'l_25': 'decoder.block.22.layer.1.EncDecAttention.k', 'l_26': 'decoder.block.22.layer.1.EncDecAttention.v', 'l_27': 'decoder.block.22.layer.1.EncDecAttention.o', 'l_28': 'decoder.block.22.layer.1.dropout', 'l_29': 'decoder.block.22.layer.2.layer_norm', 'l_30': 'decoder.block.22.layer.2.DenseReluDense.wi', 'l_31': 'decoder.block.22.layer.2.DenseReluDense.dropout', 'l_32': 'decoder.block.22.layer.2.DenseReluDense.wo', 'l_33': 'decoder.block.22.layer.2.dropout', 'l_34': 'decoder.block.23.layer.0.layer_norm', 'l_35': 'decoder.block.23.layer.0.SelfAttention.q', 'l_36': 'decoder.block.23.layer.0.SelfAttention.k', 'l_37': 'decoder.block.23.layer.0.SelfAttention.v', 'l_38': 'decoder.block.23.layer.0.SelfAttention.o', 'l_39': 'decoder.block.23.layer.0.dropout', 'l_40': 'decoder.block.23.layer.1.layer_norm', 'l_41': 'decoder.block.23.layer.1.EncDecAttention.q', 'l_42': 'decoder.block.23.layer.1.EncDecAttention.k', 'l_43': 'decoder.block.23.layer.1.EncDecAttention.v', 'l_44': 'decoder.block.23.layer.1.EncDecAttention.o', 'l_45': 'decoder.block.23.layer.1.dropout', 'l_46': 'decoder.block.23.layer.2.layer_norm', 'l_47': 'decoder.block.23.layer.2.DenseReluDense.wi', 'l_48': 'decoder.block.23.layer.2.DenseReluDense.dropout', 'l_49': 'decoder.block.23.layer.2.DenseReluDense.wo', 'l_50': 'decoder.block.23.layer.2.dropout', 'l_51': 'decoder.final_layer_norm', 'l_52': 'decoder.dropout', 'l_53': 'lm_head'} self.to(self.device) def forward(self, *args): (labels, x0, x1, x2, x3) = unflatten(args, self.input_structure) t_0 = self.l_8(x0) t_1 = self.l_9(x0) t_2 = self.l_25(x0) t_3 = self.l_26(x0) t_4 = self.l_42(x0) t_5 = self.l_43(x0) t_6 = self.l_0(x1) t_7 = self.l_1(t_6) t_8 = self.l_2(t_6) t_9 = self.l_3(t_6) t_6 = t_6.shape t_6 = t_6[slice(None, 2, None)] t_6 = t_6[0] t_7 = t_7.view(t_6, (- 1), 32, 128) t_7 = t_7.transpose(1, 2) t_8 = t_8.view(t_6, (- 1), 32, 128) t_8 = t_8.transpose(1, 2) t_9 = t_9.view(t_6, (- 1), 32, 128) t_9 = t_9.transpose(1, 2) t_8 = t_8.transpose(3, 2) t_8 = torch.matmul(t_7, t_8) t_8 += x2 t_7 = t_8.float() t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None) t_8 = t_7.type_as(t_8) t_8 = torch.nn.functional.dropout(t_8, p=0.1, training=self.training, inplace=False) t_9 = torch.matmul(t_8, t_9) t_9 = t_9.transpose(1, 2) t_9 = t_9.contiguous() t_6 = t_9.view(t_6, (- 1), 4096) t_6 = self.l_4(t_6) t_9 = self.l_5(t_6) t_9 = (x1 + t_9) t_6 = (t_6, None, x2) t_8 = t_6[0] t_9 = (t_9,) t_6 = t_6[slice(1, None, None)] t_6 = (t_9 + t_6) t_9 = t_6[slice(None, 2, None)] t_7 = t_9[0] t_10 = self.l_6(t_7) t_9 = t_9[1] t_6 = t_6[slice(2, None, None)] t_11 = self.l_7(t_10) t_10 = t_10.shape t_10 = t_10[slice(None, 2, None)] t_10 = t_10[0] t_11 = t_11.view(t_10, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_0 = t_0.view(t_10, (- 1), 32, 128) t_0 = t_0.transpose(1, 2) t_1 = t_1.view(t_10, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_0 = t_0.transpose(3, 2) t_0 = torch.matmul(t_11, t_0) t_0 += x3 t_11 = t_0.float() t_11 = torch.nn.functional.softmax(t_11, dim=(- 1), _stacklevel=3, dtype=None) t_0 = t_11.type_as(t_0) t_0 = torch.nn.functional.dropout(t_0, p=0.1, training=self.training, inplace=False) t_1 = torch.matmul(t_0, t_1) t_1 = t_1.transpose(1, 2) t_1 = t_1.contiguous() t_10 = t_1.view(t_10, (- 1), 4096) t_10 = self.l_10(t_10) t_1 = self.l_11(t_10) t_1 = (t_7 + t_1) t_10 = (t_10, None, x3) t_7 = t_10[0] t_1 = (t_1,) t_10 = t_10[slice(1, None, None)] t_10 = (t_1 + t_10) t_1 = t_10[0] t_0 = self.l_12(t_1) t_10 = t_10[slice(2, None, None)] t_10 = (t_6 + t_10) t_0 = self.l_13(t_0) t_0 = torch.nn.functional.relu(t_0, inplace=False) t_0 = self.l_14(t_0) t_0 = self.l_15(t_0) t_0 = self.l_16(t_0) t_0 = (t_1 + t_0) t_9 = (t_0, t_9) t_10 = (t_9 + t_10) t_9 = t_10[slice(None, 2, None)] t_9 = t_9[0] t_0 = self.l_17(t_9) t_1 = t_10[2] t_10 = t_10[3] t_6 = self.l_18(t_0) t_11 = self.l_19(t_0) t_12 = self.l_20(t_0) t_0 = t_0.shape t_0 = t_0[slice(None, 2, None)] t_0 = t_0[0] t_6 = t_6.view(t_0, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_11 = t_11.view(t_0, (- 1), 32, 128) t_11 = t_11.transpose(1, 2) t_12 = t_12.view(t_0, (- 1), 32, 128) t_12 = t_12.transpose(1, 2) t_11 = t_11.transpose(3, 2) t_11 = torch.matmul(t_6, t_11) t_11 += t_1 t_6 = t_11.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_11 = t_6.type_as(t_11) t_11 = torch.nn.functional.dropout(t_11, p=0.1, training=self.training, inplace=False) t_12 = torch.matmul(t_11, t_12) t_12 = t_12.transpose(1, 2) t_12 = t_12.contiguous() t_0 = t_12.view(t_0, (- 1), 4096) t_0 = self.l_21(t_0) t_12 = self.l_22(t_0) t_12 = (t_9 + t_12) t_1 = (t_0, None, t_1) t_0 = t_1[0] t_12 = (t_12,) t_1 = t_1[slice(1, None, None)] t_1 = (t_12 + t_1) t_12 = t_1[slice(None, 2, None)] t_9 = t_12[0] t_11 = self.l_23(t_9) t_12 = t_12[1] t_1 = t_1[slice(2, None, None)] t_6 = self.l_24(t_11) t_11 = t_11.shape t_11 = t_11[slice(None, 2, None)] t_11 = t_11[0] t_6 = t_6.view(t_11, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.view(t_11, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_3 = t_3.view(t_11, (- 1), 32, 128) t_3 = t_3.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_6, t_2) t_2 += t_10 t_6 = t_2.float() t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_6.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_3 = torch.matmul(t_2, t_3) t_3 = t_3.transpose(1, 2) t_3 = t_3.contiguous() t_11 = t_3.view(t_11, (- 1), 4096) t_11 = self.l_27(t_11) t_3 = self.l_28(t_11) t_3 = (t_9 + t_3) t_10 = (t_11, None, t_10) t_11 = t_10[0] t_3 = (t_3,) t_10 = t_10[slice(1, None, None)] t_10 = (t_3 + t_10) t_3 = t_10[0] t_9 = self.l_29(t_3) t_10 = t_10[slice(2, None, None)] t_10 = (t_1 + t_10) t_9 = self.l_30(t_9) t_9 = torch.nn.functional.relu(t_9, inplace=False) t_9 = self.l_31(t_9) t_9 = self.l_32(t_9) t_9 = self.l_33(t_9) t_9 = (t_3 + t_9) t_12 = (t_9, t_12) t_10 = (t_12 + t_10) t_12 = t_10[slice(None, 2, None)] t_12 = t_12[0] t_9 = self.l_34(t_12) t_3 = t_10[2] t_10 = t_10[3] t_1 = self.l_35(t_9) t_2 = self.l_36(t_9) t_6 = self.l_37(t_9) t_9 = t_9.shape t_9 = t_9[slice(None, 2, None)] t_9 = t_9[0] t_1 = t_1.view(t_9, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_2 = t_2.view(t_9, (- 1), 32, 128) t_2 = t_2.transpose(1, 2) t_6 = t_6.view(t_9, (- 1), 32, 128) t_6 = t_6.transpose(1, 2) t_2 = t_2.transpose(3, 2) t_2 = torch.matmul(t_1, t_2) t_2 += t_3 t_1 = t_2.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_2 = t_1.type_as(t_2) t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False) t_6 = torch.matmul(t_2, t_6) t_6 = t_6.transpose(1, 2) t_6 = t_6.contiguous() t_9 = t_6.view(t_9, (- 1), 4096) t_9 = self.l_38(t_9) t_6 = self.l_39(t_9) t_6 = (t_12 + t_6) t_3 = (t_9, None, t_3) t_9 = t_3[0] t_6 = (t_6,) t_3 = t_3[slice(1, None, None)] t_3 = (t_6 + t_3) t_6 = t_3[slice(None, 2, None)] t_12 = t_6[0] t_2 = self.l_40(t_12) t_6 = t_6[1] t_3 = t_3[slice(2, None, None)] t_1 = self.l_41(t_2) t_2 = t_2.shape t_2 = t_2[slice(None, 2, None)] t_2 = t_2[0] t_1 = t_1.view(t_2, (- 1), 32, 128) t_1 = t_1.transpose(1, 2) t_4 = t_4.view(t_2, (- 1), 32, 128) t_4 = t_4.transpose(1, 2) t_5 = t_5.view(t_2, (- 1), 32, 128) t_5 = t_5.transpose(1, 2) t_4 = t_4.transpose(3, 2) t_4 = torch.matmul(t_1, t_4) t_4 += t_10 t_1 = t_4.float() t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None) t_4 = t_1.type_as(t_4) t_4 = torch.nn.functional.dropout(t_4, p=0.1, training=self.training, inplace=False) t_5 = torch.matmul(t_4, t_5) t_5 = t_5.transpose(1, 2) t_5 = t_5.contiguous() t_2 = t_5.view(t_2, (- 1), 4096) t_2 = self.l_44(t_2) t_5 = self.l_45(t_2) t_5 = (t_12 + t_5) t_10 = (t_2, None, t_10) t_2 = t_10[0] t_5 = (t_5,) t_10 = t_10[slice(1, None, None)] t_10 = (t_5 + t_10) t_5 = t_10[0] t_12 = self.l_46(t_5) t_10 = t_10[slice(2, None, None)] t_10 = (t_3 + t_10) t_12 = self.l_47(t_12) t_12 = torch.nn.functional.relu(t_12, inplace=False) t_12 = self.l_48(t_12) t_12 = self.l_49(t_12) t_12 = self.l_50(t_12) t_12 = (t_5 + t_12) t_6 = (t_12, t_6) t_10 = (t_6 + t_10) t_6 = t_10[slice(None, 2, None)] t_6 = t_6[0] t_6 = self.l_51(t_6) t_12 = t_10[2] t_10 = t_10[3] t_6 = self.l_52(t_6) t_6 = (t_6 * 0.03125) t_6 = self.l_53(t_6) t_5 = t_6.size((- 1)) t_5 = t_6.view((- 1), t_5) t_6 = labels.view((- 1)) t_6 = torch.nn.functional.cross_entropy(t_5, t_6, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean') return (t_6,) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]: '\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n ' if (prefix is None): prefix = type(module).__name__ for (name, sub_module) in module.named_children(): scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]') if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)): if full: (yield (sub_module, scope, module, True)) else: (yield (sub_module, scope, module)) else: if full: (yield (sub_module, scope, module, False)) (yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]: return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]: "\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n " if (prefix is None): prefix = type(module).__name__ for (param_name, param) in module.named_parameters(recurse=False): param_scope = f'{prefix}/{type(param).__name__}[{param_name}]' (yield (param, param_scope)) for (buffer_name, buffer) in module.named_buffers(recurse=False): buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]' (yield (buffer, buffer_scope)) for (name, sub_module) in module.named_children(): (yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]: return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
def move_tensors(ts, device): def move(t): if isinstance(t, (nn.Module, Tensor)): return t.to(device) return t return nested_map(move, ts)
def nested_map(func, ts, full=False): if isinstance(ts, torch.Size): return func(ts) elif isinstance(ts, (list, tuple, set)): return type(ts)((nested_map(func, t, full=full) for t in ts)) elif isinstance(ts, dict): return {k: nested_map(func, v, full=full) for (k, v) in ts.items()} elif (isinstance(ts, slice) and full): start = nested_map(func, ts.start, full=full) stop = nested_map(func, ts.stop, full=full) step = nested_map(func, ts.step, full=full) return slice(start, stop, step) return func(ts)
def flatten(ts): if isinstance(ts, torch.Size): (yield ts) elif isinstance(ts, (list, tuple, set)): (yield from chain(*[flatten(t) for t in ts])) elif isinstance(ts, dict): (yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))])) else: (yield ts)
def unflatten(xs, structure): return _unflatten(xs, structure)[0]
def _unflatten(xs, structure): if isinstance(structure, torch.Size): return (xs[0], 1) if (not isinstance(structure, (list, tuple, set, dict))): return (xs[0], 1) if isinstance(structure, (list, tuple, set)): offset = 0 elements = [] for s in structure: (e, n) = _unflatten(xs[offset:], s) elements.append(e) offset += n return (type(structure)(elements), offset) assert isinstance(structure, dict) offset = 0 elements = dict() for (k, v) in sorted(structure.items(), key=(lambda t: t[0])): (e, n) = _unflatten(xs[offset:], v) elements[k] = e offset += n return (elements, offset)
def state_dict(partition, *args, **kwargs): state = nn.Module.state_dict(partition, *args, **kwargs) lookup = partition.lookup result = dict() for (k, v) in state.items(): if (k in lookup): result[lookup[k]] = v else: assert ('.' in k) split_idx = k.find('.') new_k = (lookup[k[:split_idx]] + k[split_idx:]) result[new_k] = v return result