code stringlengths 17 6.64M |
|---|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def bert_large_uncased_whole_word_maskings_384_8p_bw12_async_pipedream():
return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
|
def create_pipeline_configuration(DEBUG=False, batch_size=24):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Softmax, Embedding, Linear, Tanh, LayerNorm, Dropout), 'model_inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3, 4, 5, 6, 7]}, 'input_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'token_type_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}}, 'model_outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([24, 384, 2]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'token_type_ids': {'shape': torch.Size([24, 384]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'attention_mask': {'shape': torch.Size([24, 1, 1, 384]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]': {'shape': torch.Size([24, 384, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'BertForQuestionAnswering/Linear[qa_outputs]': {'shape': torch.Size([24, 384, 2]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[word_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[position_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Embedding[token_type_embeddings]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEmbeddings[embeddings]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[0]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[1]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[2]/BertOutput[output]/LayerNorm[LayerNorm]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.embeddings.word_embeddings', 'l_1': 'bert.embeddings.position_embeddings', 'l_2': 'bert.embeddings.token_type_embeddings', 'l_3': 'bert.embeddings.LayerNorm', 'l_4': 'bert.embeddings.dropout', 'l_5': 'bert.encoder.0.attention.self.query', 'l_6': 'bert.encoder.0.attention.self.key', 'l_7': 'bert.encoder.0.attention.self.value', 'l_8': 'bert.encoder.0.attention.self.softmax', 'l_9': 'bert.encoder.0.attention.self.dropout', 'l_10': 'bert.encoder.0.attention.output.dense', 'l_11': 'bert.encoder.0.attention.output.dropout', 'l_12': 'bert.encoder.0.attention.output.LayerNorm', 'l_13': 'bert.encoder.0.intermediate.dense', 'l_14': 'bert.encoder.0.output.dense', 'l_15': 'bert.encoder.0.output.dropout', 'l_16': 'bert.encoder.0.output.LayerNorm', 'l_17': 'bert.encoder.1.attention.self.query', 'l_18': 'bert.encoder.1.attention.self.key', 'l_19': 'bert.encoder.1.attention.self.value', 'l_20': 'bert.encoder.1.attention.self.softmax', 'l_21': 'bert.encoder.1.attention.self.dropout', 'l_22': 'bert.encoder.1.attention.output.dense', 'l_23': 'bert.encoder.1.attention.output.dropout', 'l_24': 'bert.encoder.1.attention.output.LayerNorm', 'l_25': 'bert.encoder.1.intermediate.dense', 'l_26': 'bert.encoder.1.output.dense', 'l_27': 'bert.encoder.1.output.dropout', 'l_28': 'bert.encoder.1.output.LayerNorm', 'l_29': 'bert.encoder.2.attention.self.query', 'l_30': 'bert.encoder.2.attention.self.key', 'l_31': 'bert.encoder.2.attention.self.value', 'l_32': 'bert.encoder.2.attention.self.softmax', 'l_33': 'bert.encoder.2.attention.self.dropout', 'l_34': 'bert.encoder.2.attention.output.dense', 'l_35': 'bert.encoder.2.attention.output.dropout', 'l_36': 'bert.encoder.2.attention.output.LayerNorm', 'l_37': 'bert.encoder.2.intermediate.dense', 'l_38': 'bert.encoder.2.output.dense', 'l_39': 'bert.encoder.2.output.dropout', 'l_40': 'bert.encoder.2.output.LayerNorm'}
self.to(self.device)
def forward(self, *args):
(attention_mask, input_ids, token_type_ids) = unflatten(args, self.input_structure)
t_0 = self.l_0(input_ids)
t_1 = self.l_2(token_type_ids)
t_2 = input_ids.size(1)
t_2 = torch.arange(t_2, dtype=torch.int64, device=self.device)
t_2 = t_2.unsqueeze(0)
t_2 = t_2.expand_as(input_ids)
t_2 = self.l_1(t_2)
t_2 = (t_0 + t_2)
t_1 = (t_2 + t_1)
t_1 = self.l_3(t_1)
t_1 = self.l_4(t_1)
t_2 = self.l_5(t_1)
t_0 = self.l_6(t_1)
t_3 = self.l_7(t_1)
t_4 = t_2.size()
t_5 = t_0.size()
t_6 = t_3.size()
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_7 = t_4[0]
t_8 = t_4[1]
t_9 = t_4[2]
t_4 = t_4[3]
t_4 = t_2.view(t_7, t_8, t_9, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_0.view(t_9, t_8, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_7 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_3.view(t_7, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_4, t_5)
t_4 = math.sqrt(64)
t_4 = (t_5 / t_4)
t_4 = (t_4 + attention_mask)
t_4 = self.l_8(t_4)
t_4 = self.l_9(t_4)
t_6 = torch.matmul(t_4, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_4 = t_6.size()
t_4 = t_4[slice(None, (- 2), None)]
t_4 = (t_4 + (1024,))
t_5 = t_4[0]
t_9 = t_4[1]
t_4 = t_4[2]
t_4 = t_6.view(t_5, t_9, t_4)
t_4 = self.l_10(t_4)
t_4 = self.l_11(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_12(t_1)
t_4 = self.l_13(t_1)
t_4 = torch.nn.functional.gelu(t_4)
t_4 = self.l_14(t_4)
t_4 = self.l_15(t_4)
t_1 = (t_4 + t_1)
t_1 = self.l_16(t_1)
t_4 = self.l_17(t_1)
t_9 = self.l_18(t_1)
t_5 = self.l_19(t_1)
t_6 = t_4.size()
t_8 = t_9.size()
t_7 = t_5.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_3 = t_6[0]
t_0 = t_6[1]
t_2 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_3, t_0, t_2, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_2 = t_8[0]
t_0 = t_8[1]
t_3 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_2, t_0, t_3, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_3 = t_7[0]
t_0 = t_7[1]
t_2 = t_7[2]
t_7 = t_7[3]
t_7 = t_5.view(t_3, t_0, t_2, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_6, t_8)
t_6 = math.sqrt(64)
t_6 = (t_8 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_7 = torch.matmul(t_6, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_7 = t_7.contiguous()
t_6 = t_7.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_8 = t_6[0]
t_2 = t_6[1]
t_6 = t_6[2]
t_6 = t_7.view(t_8, t_2, t_6)
t_6 = self.l_22(t_6)
t_6 = self.l_23(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_24(t_1)
t_6 = self.l_25(t_1)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_26(t_6)
t_6 = self.l_27(t_6)
t_1 = (t_6 + t_1)
t_1 = self.l_28(t_1)
t_6 = self.l_29(t_1)
t_2 = self.l_30(t_1)
t_8 = self.l_31(t_1)
t_7 = t_6.size()
t_0 = t_2.size()
t_3 = t_8.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_5 = t_7[0]
t_9 = t_7[1]
t_4 = t_7[2]
t_7 = t_7[3]
t_7 = t_6.view(t_5, t_9, t_4, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_9 = t_0[1]
t_5 = t_0[2]
t_0 = t_0[3]
t_0 = t_2.view(t_4, t_9, t_5, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_5 = t_3[0]
t_9 = t_3[1]
t_4 = t_3[2]
t_3 = t_3[3]
t_3 = t_8.view(t_5, t_9, t_4, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_7, t_0)
t_7 = math.sqrt(64)
t_7 = (t_0 / t_7)
t_7 = (t_7 + attention_mask)
t_7 = self.l_32(t_7)
t_7 = self.l_33(t_7)
t_3 = torch.matmul(t_7, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_3 = t_3.contiguous()
t_7 = t_3.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_0 = t_7[0]
t_4 = t_7[1]
t_7 = t_7[2]
t_7 = t_3.view(t_0, t_4, t_7)
t_7 = self.l_34(t_7)
t_7 = self.l_35(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_36(t_1)
t_7 = self.l_37(t_1)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_38(t_7)
t_7 = self.l_39(t_7)
t_1 = (t_7 + t_1)
t_1 = self.l_40(t_1)
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[3]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[4]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[5]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'bert.encoder.3.attention.self.query', 'l_1': 'bert.encoder.3.attention.self.key', 'l_2': 'bert.encoder.3.attention.self.value', 'l_3': 'bert.encoder.3.attention.self.softmax', 'l_4': 'bert.encoder.3.attention.self.dropout', 'l_5': 'bert.encoder.3.attention.output.dense', 'l_6': 'bert.encoder.3.attention.output.dropout', 'l_7': 'bert.encoder.3.attention.output.LayerNorm', 'l_8': 'bert.encoder.3.intermediate.dense', 'l_9': 'bert.encoder.3.output.dense', 'l_10': 'bert.encoder.3.output.dropout', 'l_11': 'bert.encoder.3.output.LayerNorm', 'l_12': 'bert.encoder.4.attention.self.query', 'l_13': 'bert.encoder.4.attention.self.key', 'l_14': 'bert.encoder.4.attention.self.value', 'l_15': 'bert.encoder.4.attention.self.softmax', 'l_16': 'bert.encoder.4.attention.self.dropout', 'l_17': 'bert.encoder.4.attention.output.dense', 'l_18': 'bert.encoder.4.attention.output.dropout', 'l_19': 'bert.encoder.4.attention.output.LayerNorm', 'l_20': 'bert.encoder.4.intermediate.dense', 'l_21': 'bert.encoder.4.output.dense', 'l_22': 'bert.encoder.4.output.dropout', 'l_23': 'bert.encoder.4.output.LayerNorm', 'l_24': 'bert.encoder.5.attention.self.query', 'l_25': 'bert.encoder.5.attention.self.key', 'l_26': 'bert.encoder.5.attention.self.value', 'l_27': 'bert.encoder.5.attention.self.softmax', 'l_28': 'bert.encoder.5.attention.self.dropout', 'l_29': 'bert.encoder.5.attention.output.dense', 'l_30': 'bert.encoder.5.attention.output.dropout', 'l_31': 'bert.encoder.5.attention.output.LayerNorm', 'l_32': 'bert.encoder.5.intermediate.dense', 'l_33': 'bert.encoder.5.output.dense', 'l_34': 'bert.encoder.5.output.dropout', 'l_35': 'bert.encoder.5.output.LayerNorm', 'l_36': 'bert.encoder.6.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = self.l_2(x0)
t_3 = t_0.size()
t_4 = t_1.size()
t_5 = t_2.size()
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_6 = t_3[0]
t_7 = t_3[1]
t_8 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_6, t_7, t_8, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_8 = t_4[0]
t_7 = t_4[1]
t_6 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_8, t_7, t_6, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_6 = t_5[0]
t_7 = t_5[1]
t_8 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_6, t_7, t_8, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_3, t_4)
t_3 = math.sqrt(64)
t_3 = (t_4 / t_3)
t_3 = (t_3 + attention_mask)
t_3 = self.l_3(t_3)
t_3 = self.l_4(t_3)
t_5 = torch.matmul(t_3, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_3 = t_5.size()
t_3 = t_3[slice(None, (- 2), None)]
t_3 = (t_3 + (1024,))
t_4 = t_3[0]
t_8 = t_3[1]
t_3 = t_3[2]
t_3 = t_5.view(t_4, t_8, t_3)
t_3 = self.l_5(t_3)
t_3 = self.l_6(t_3)
t_3 = (t_3 + x0)
t_3 = self.l_7(t_3)
t_8 = self.l_8(t_3)
t_8 = torch.nn.functional.gelu(t_8)
t_8 = self.l_9(t_8)
t_8 = self.l_10(t_8)
t_3 = (t_8 + t_3)
t_3 = self.l_11(t_3)
t_8 = self.l_12(t_3)
t_4 = self.l_13(t_3)
t_5 = self.l_14(t_3)
t_7 = t_8.size()
t_6 = t_4.size()
t_2 = t_5.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_1 = t_7[0]
t_0 = t_7[1]
t_9 = t_7[2]
t_7 = t_7[3]
t_7 = t_8.view(t_1, t_0, t_9, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_9 = t_6[0]
t_0 = t_6[1]
t_1 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_9, t_0, t_1, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_1 = t_2[0]
t_0 = t_2[1]
t_9 = t_2[2]
t_2 = t_2[3]
t_2 = t_5.view(t_1, t_0, t_9, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_7, t_6)
t_7 = math.sqrt(64)
t_7 = (t_6 / t_7)
t_7 = (t_7 + attention_mask)
t_7 = self.l_15(t_7)
t_7 = self.l_16(t_7)
t_2 = torch.matmul(t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_2 = t_2.contiguous()
t_7 = t_2.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_6 = t_7[0]
t_9 = t_7[1]
t_7 = t_7[2]
t_7 = t_2.view(t_6, t_9, t_7)
t_7 = self.l_17(t_7)
t_7 = self.l_18(t_7)
t_3 = (t_7 + t_3)
t_3 = self.l_19(t_3)
t_7 = self.l_20(t_3)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_21(t_7)
t_7 = self.l_22(t_7)
t_3 = (t_7 + t_3)
t_3 = self.l_23(t_3)
t_7 = self.l_24(t_3)
t_9 = self.l_25(t_3)
t_6 = self.l_26(t_3)
t_2 = t_7.size()
t_0 = t_9.size()
t_1 = t_6.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_4 = t_2[1]
t_8 = t_2[2]
t_2 = t_2[3]
t_2 = t_7.view(t_5, t_4, t_8, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_8 = t_0[0]
t_4 = t_0[1]
t_5 = t_0[2]
t_0 = t_0[3]
t_0 = t_9.view(t_8, t_4, t_5, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_5 = t_1[0]
t_4 = t_1[1]
t_8 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_5, t_4, t_8, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_2, t_0)
t_2 = math.sqrt(64)
t_2 = (t_0 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_27(t_2)
t_2 = self.l_28(t_2)
t_1 = torch.matmul(t_2, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_2 = t_1.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_0 = t_2[0]
t_8 = t_2[1]
t_2 = t_2[2]
t_2 = t_1.view(t_0, t_8, t_2)
t_2 = self.l_29(t_2)
t_2 = self.l_30(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_31(t_3)
t_2 = self.l_32(t_3)
t_2 = torch.nn.functional.gelu(t_2)
t_2 = self.l_33(t_2)
t_2 = self.l_34(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_35(t_3)
t_2 = self.l_36(t_3)
return list(flatten((t_3, t_2)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition2(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.6.attention.self.key', 'l_1': 'bert.encoder.6.attention.self.value', 'l_2': 'bert.encoder.6.attention.self.softmax', 'l_3': 'bert.encoder.6.attention.self.dropout', 'l_4': 'bert.encoder.6.attention.output.dense', 'l_5': 'bert.encoder.6.attention.output.dropout', 'l_6': 'bert.encoder.6.attention.output.LayerNorm', 'l_7': 'bert.encoder.6.intermediate.dense', 'l_8': 'bert.encoder.6.output.dense', 'l_9': 'bert.encoder.6.output.dropout', 'l_10': 'bert.encoder.6.output.LayerNorm', 'l_11': 'bert.encoder.7.attention.self.query', 'l_12': 'bert.encoder.7.attention.self.key', 'l_13': 'bert.encoder.7.attention.self.value', 'l_14': 'bert.encoder.7.attention.self.softmax', 'l_15': 'bert.encoder.7.attention.self.dropout', 'l_16': 'bert.encoder.7.attention.output.dense', 'l_17': 'bert.encoder.7.attention.output.dropout', 'l_18': 'bert.encoder.7.attention.output.LayerNorm', 'l_19': 'bert.encoder.7.intermediate.dense', 'l_20': 'bert.encoder.7.output.dense', 'l_21': 'bert.encoder.7.output.dropout', 'l_22': 'bert.encoder.7.output.LayerNorm', 'l_23': 'bert.encoder.8.attention.self.query', 'l_24': 'bert.encoder.8.attention.self.key', 'l_25': 'bert.encoder.8.attention.self.value', 'l_26': 'bert.encoder.8.attention.self.softmax', 'l_27': 'bert.encoder.8.attention.self.dropout', 'l_28': 'bert.encoder.8.attention.output.dense', 'l_29': 'bert.encoder.8.attention.output.dropout', 'l_30': 'bert.encoder.8.attention.output.LayerNorm', 'l_31': 'bert.encoder.8.intermediate.dense', 'l_32': 'bert.encoder.8.output.dense', 'l_33': 'bert.encoder.8.output.dropout', 'l_34': 'bert.encoder.8.output.LayerNorm', 'l_35': 'bert.encoder.9.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_35(t_2)
return list(flatten((t_2, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition3(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.9.attention.self.key', 'l_1': 'bert.encoder.9.attention.self.value', 'l_2': 'bert.encoder.9.attention.self.softmax', 'l_3': 'bert.encoder.9.attention.self.dropout', 'l_4': 'bert.encoder.9.attention.output.dense', 'l_5': 'bert.encoder.9.attention.output.dropout', 'l_6': 'bert.encoder.9.attention.output.LayerNorm', 'l_7': 'bert.encoder.9.intermediate.dense', 'l_8': 'bert.encoder.9.output.dense', 'l_9': 'bert.encoder.9.output.dropout', 'l_10': 'bert.encoder.9.output.LayerNorm', 'l_11': 'bert.encoder.10.attention.self.query', 'l_12': 'bert.encoder.10.attention.self.key', 'l_13': 'bert.encoder.10.attention.self.value', 'l_14': 'bert.encoder.10.attention.self.softmax', 'l_15': 'bert.encoder.10.attention.self.dropout', 'l_16': 'bert.encoder.10.attention.output.dense', 'l_17': 'bert.encoder.10.attention.output.dropout', 'l_18': 'bert.encoder.10.attention.output.LayerNorm', 'l_19': 'bert.encoder.10.intermediate.dense', 'l_20': 'bert.encoder.10.output.dense', 'l_21': 'bert.encoder.10.output.dropout', 'l_22': 'bert.encoder.10.output.LayerNorm', 'l_23': 'bert.encoder.11.attention.self.query', 'l_24': 'bert.encoder.11.attention.self.key', 'l_25': 'bert.encoder.11.attention.self.value', 'l_26': 'bert.encoder.11.attention.self.softmax', 'l_27': 'bert.encoder.11.attention.self.dropout', 'l_28': 'bert.encoder.11.attention.output.dense', 'l_29': 'bert.encoder.11.attention.output.dropout', 'l_30': 'bert.encoder.11.attention.output.LayerNorm', 'l_31': 'bert.encoder.11.intermediate.dense', 'l_32': 'bert.encoder.11.output.dense', 'l_33': 'bert.encoder.11.output.dropout', 'l_34': 'bert.encoder.11.output.LayerNorm', 'l_35': 'bert.encoder.12.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_35(t_2)
return list(flatten((t_2, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition4(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[13]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[14]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:4'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.12.attention.self.key', 'l_1': 'bert.encoder.12.attention.self.value', 'l_2': 'bert.encoder.12.attention.self.softmax', 'l_3': 'bert.encoder.12.attention.self.dropout', 'l_4': 'bert.encoder.12.attention.output.dense', 'l_5': 'bert.encoder.12.attention.output.dropout', 'l_6': 'bert.encoder.12.attention.output.LayerNorm', 'l_7': 'bert.encoder.12.intermediate.dense', 'l_8': 'bert.encoder.12.output.dense', 'l_9': 'bert.encoder.12.output.dropout', 'l_10': 'bert.encoder.12.output.LayerNorm', 'l_11': 'bert.encoder.13.attention.self.query', 'l_12': 'bert.encoder.13.attention.self.key', 'l_13': 'bert.encoder.13.attention.self.value', 'l_14': 'bert.encoder.13.attention.self.softmax', 'l_15': 'bert.encoder.13.attention.self.dropout', 'l_16': 'bert.encoder.13.attention.output.dense', 'l_17': 'bert.encoder.13.attention.output.dropout', 'l_18': 'bert.encoder.13.attention.output.LayerNorm', 'l_19': 'bert.encoder.13.intermediate.dense', 'l_20': 'bert.encoder.13.output.dense', 'l_21': 'bert.encoder.13.output.dropout', 'l_22': 'bert.encoder.13.output.LayerNorm', 'l_23': 'bert.encoder.14.attention.self.query', 'l_24': 'bert.encoder.14.attention.self.key', 'l_25': 'bert.encoder.14.attention.self.value', 'l_26': 'bert.encoder.14.attention.self.softmax', 'l_27': 'bert.encoder.14.attention.self.dropout', 'l_28': 'bert.encoder.14.attention.output.dense', 'l_29': 'bert.encoder.14.attention.output.dropout', 'l_30': 'bert.encoder.14.attention.output.LayerNorm', 'l_31': 'bert.encoder.14.intermediate.dense', 'l_32': 'bert.encoder.14.output.dense', 'l_33': 'bert.encoder.14.output.dropout', 'l_34': 'bert.encoder.14.output.LayerNorm', 'l_35': 'bert.encoder.15.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_35(t_2)
return list(flatten((t_2, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition5(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[15]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[16]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[17]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:5'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.15.attention.self.key', 'l_1': 'bert.encoder.15.attention.self.value', 'l_2': 'bert.encoder.15.attention.self.softmax', 'l_3': 'bert.encoder.15.attention.self.dropout', 'l_4': 'bert.encoder.15.attention.output.dense', 'l_5': 'bert.encoder.15.attention.output.dropout', 'l_6': 'bert.encoder.15.attention.output.LayerNorm', 'l_7': 'bert.encoder.15.intermediate.dense', 'l_8': 'bert.encoder.15.output.dense', 'l_9': 'bert.encoder.15.output.dropout', 'l_10': 'bert.encoder.15.output.LayerNorm', 'l_11': 'bert.encoder.16.attention.self.query', 'l_12': 'bert.encoder.16.attention.self.key', 'l_13': 'bert.encoder.16.attention.self.value', 'l_14': 'bert.encoder.16.attention.self.softmax', 'l_15': 'bert.encoder.16.attention.self.dropout', 'l_16': 'bert.encoder.16.attention.output.dense', 'l_17': 'bert.encoder.16.attention.output.dropout', 'l_18': 'bert.encoder.16.attention.output.LayerNorm', 'l_19': 'bert.encoder.16.intermediate.dense', 'l_20': 'bert.encoder.16.output.dense', 'l_21': 'bert.encoder.16.output.dropout', 'l_22': 'bert.encoder.16.output.LayerNorm', 'l_23': 'bert.encoder.17.attention.self.query', 'l_24': 'bert.encoder.17.attention.self.key', 'l_25': 'bert.encoder.17.attention.self.value', 'l_26': 'bert.encoder.17.attention.self.softmax', 'l_27': 'bert.encoder.17.attention.self.dropout', 'l_28': 'bert.encoder.17.attention.output.dense', 'l_29': 'bert.encoder.17.attention.output.dropout', 'l_30': 'bert.encoder.17.attention.output.LayerNorm', 'l_31': 'bert.encoder.17.intermediate.dense', 'l_32': 'bert.encoder.17.output.dense', 'l_33': 'bert.encoder.17.output.dropout', 'l_34': 'bert.encoder.17.output.LayerNorm', 'l_35': 'bert.encoder.18.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_35(t_2)
return list(flatten((t_2, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition6(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[18]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[19]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[20]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.18.attention.self.key', 'l_1': 'bert.encoder.18.attention.self.value', 'l_2': 'bert.encoder.18.attention.self.softmax', 'l_3': 'bert.encoder.18.attention.self.dropout', 'l_4': 'bert.encoder.18.attention.output.dense', 'l_5': 'bert.encoder.18.attention.output.dropout', 'l_6': 'bert.encoder.18.attention.output.LayerNorm', 'l_7': 'bert.encoder.18.intermediate.dense', 'l_8': 'bert.encoder.18.output.dense', 'l_9': 'bert.encoder.18.output.dropout', 'l_10': 'bert.encoder.18.output.LayerNorm', 'l_11': 'bert.encoder.19.attention.self.query', 'l_12': 'bert.encoder.19.attention.self.key', 'l_13': 'bert.encoder.19.attention.self.value', 'l_14': 'bert.encoder.19.attention.self.softmax', 'l_15': 'bert.encoder.19.attention.self.dropout', 'l_16': 'bert.encoder.19.attention.output.dense', 'l_17': 'bert.encoder.19.attention.output.dropout', 'l_18': 'bert.encoder.19.attention.output.LayerNorm', 'l_19': 'bert.encoder.19.intermediate.dense', 'l_20': 'bert.encoder.19.output.dense', 'l_21': 'bert.encoder.19.output.dropout', 'l_22': 'bert.encoder.19.output.LayerNorm', 'l_23': 'bert.encoder.20.attention.self.query', 'l_24': 'bert.encoder.20.attention.self.key', 'l_25': 'bert.encoder.20.attention.self.value', 'l_26': 'bert.encoder.20.attention.self.softmax', 'l_27': 'bert.encoder.20.attention.self.dropout', 'l_28': 'bert.encoder.20.attention.output.dense', 'l_29': 'bert.encoder.20.attention.output.dropout', 'l_30': 'bert.encoder.20.attention.output.LayerNorm', 'l_31': 'bert.encoder.20.intermediate.dense', 'l_32': 'bert.encoder.20.output.dense', 'l_33': 'bert.encoder.20.output.dropout', 'l_34': 'bert.encoder.20.output.LayerNorm', 'l_35': 'bert.encoder.21.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_35(t_2)
return list(flatten((t_2, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition7(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[21]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[22]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[23]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertPooler[pooler]/Tanh[activation]', 'BertForQuestionAnswering/Linear[qa_outputs]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:7'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'bert.encoder.21.attention.self.key', 'l_1': 'bert.encoder.21.attention.self.value', 'l_2': 'bert.encoder.21.attention.self.softmax', 'l_3': 'bert.encoder.21.attention.self.dropout', 'l_4': 'bert.encoder.21.attention.output.dense', 'l_5': 'bert.encoder.21.attention.output.dropout', 'l_6': 'bert.encoder.21.attention.output.LayerNorm', 'l_7': 'bert.encoder.21.intermediate.dense', 'l_8': 'bert.encoder.21.output.dense', 'l_9': 'bert.encoder.21.output.dropout', 'l_10': 'bert.encoder.21.output.LayerNorm', 'l_11': 'bert.encoder.22.attention.self.query', 'l_12': 'bert.encoder.22.attention.self.key', 'l_13': 'bert.encoder.22.attention.self.value', 'l_14': 'bert.encoder.22.attention.self.softmax', 'l_15': 'bert.encoder.22.attention.self.dropout', 'l_16': 'bert.encoder.22.attention.output.dense', 'l_17': 'bert.encoder.22.attention.output.dropout', 'l_18': 'bert.encoder.22.attention.output.LayerNorm', 'l_19': 'bert.encoder.22.intermediate.dense', 'l_20': 'bert.encoder.22.output.dense', 'l_21': 'bert.encoder.22.output.dropout', 'l_22': 'bert.encoder.22.output.LayerNorm', 'l_23': 'bert.encoder.23.attention.self.query', 'l_24': 'bert.encoder.23.attention.self.key', 'l_25': 'bert.encoder.23.attention.self.value', 'l_26': 'bert.encoder.23.attention.self.softmax', 'l_27': 'bert.encoder.23.attention.self.dropout', 'l_28': 'bert.encoder.23.attention.output.dense', 'l_29': 'bert.encoder.23.attention.output.dropout', 'l_30': 'bert.encoder.23.attention.output.LayerNorm', 'l_31': 'bert.encoder.23.intermediate.dense', 'l_32': 'bert.encoder.23.output.dense', 'l_33': 'bert.encoder.23.output.dropout', 'l_34': 'bert.encoder.23.output.LayerNorm', 'l_35': 'bert.pooler.dense', 'l_36': 'bert.pooler.activation', 'l_37': 'qa_outputs'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = x1.size()
t_3 = t_0.size()
t_4 = t_1.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_6 = t_2[1]
t_7 = t_2[2]
t_2 = t_2[3]
t_2 = x1.view(t_5, t_6, t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_7 = t_3[0]
t_6 = t_3[1]
t_5 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_7, t_6, t_5, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_5 = t_4[0]
t_6 = t_4[1]
t_7 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_5, t_6, t_7, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_3 = t_3.transpose((- 1), (- 2))
t_3 = torch.matmul(t_2, t_3)
t_2 = math.sqrt(64)
t_2 = (t_3 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_2(t_2)
t_2 = self.l_3(t_2)
t_4 = torch.matmul(t_2, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_4 = t_4.contiguous()
t_2 = t_4.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_3 = t_2[0]
t_7 = t_2[1]
t_2 = t_2[2]
t_2 = t_4.view(t_3, t_7, t_2)
t_2 = self.l_4(t_2)
t_2 = self.l_5(t_2)
t_2 = (t_2 + x0)
t_2 = self.l_6(t_2)
t_7 = self.l_7(t_2)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_8(t_7)
t_7 = self.l_9(t_7)
t_2 = (t_7 + t_2)
t_2 = self.l_10(t_2)
t_7 = self.l_11(t_2)
t_3 = self.l_12(t_2)
t_4 = self.l_13(t_2)
t_6 = t_7.size()
t_5 = t_3.size()
t_1 = t_4.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_9 = t_6[2]
t_6 = t_6[3]
t_6 = t_7.view(t_0, t_8, t_9, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_9 = t_5[0]
t_8 = t_5[1]
t_0 = t_5[2]
t_5 = t_5[3]
t_5 = t_3.view(t_9, t_8, t_0, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_0 = t_1[0]
t_8 = t_1[1]
t_9 = t_1[2]
t_1 = t_1[3]
t_1 = t_4.view(t_0, t_8, t_9, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_5 = t_5.transpose((- 1), (- 2))
t_5 = torch.matmul(t_6, t_5)
t_6 = math.sqrt(64)
t_6 = (t_5 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_14(t_6)
t_6 = self.l_15(t_6)
t_1 = torch.matmul(t_6, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_6 = t_1.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_5 = t_6[0]
t_9 = t_6[1]
t_6 = t_6[2]
t_6 = t_1.view(t_5, t_9, t_6)
t_6 = self.l_16(t_6)
t_6 = self.l_17(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_18(t_2)
t_6 = self.l_19(t_2)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_20(t_6)
t_6 = self.l_21(t_6)
t_2 = (t_6 + t_2)
t_2 = self.l_22(t_2)
t_6 = self.l_23(t_2)
t_9 = self.l_24(t_2)
t_5 = self.l_25(t_2)
t_1 = t_6.size()
t_8 = t_9.size()
t_0 = t_5.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_4 = t_1[0]
t_3 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_4, t_3, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_7 = t_8[0]
t_3 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_9.view(t_7, t_3, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_3 = t_0[1]
t_7 = t_0[2]
t_0 = t_0[3]
t_0 = t_5.view(t_4, t_3, t_7, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_1, t_8)
t_1 = math.sqrt(64)
t_1 = (t_8 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_0 = torch.matmul(t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_1 = t_0.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_8 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_0.view(t_8, t_7, t_1)
t_1 = self.l_28(t_1)
t_1 = self.l_29(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_30(t_2)
t_1 = self.l_31(t_2)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_2 = (t_1 + t_2)
t_2 = self.l_34(t_2)
t_1 = self.l_37(t_2)
t_2 = t_2[(slice(None, None, None), 0)]
t_2 = self.l_35(t_2)
t_2 = self.l_36(t_2)
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def bert_large_uncased_whole_word_maskings_384_8p_bw12_pipedream():
return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
|
def create_pipeline_configuration(DEBUG=False, batch_size=32):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (ParameterList, Linear, BatchNorm1d), 'model_inputs': {'input0': {'shape': torch.Size([32, 1225]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1]}}, 'model_outputs': {'Net/Linear[output_layer]': {'shape': torch.Size([32, 1]), 'dtype': torch.float32, 'is_batched': True, 'created_by': 4}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'input0': {'shape': torch.Size([32, 1225]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'Net/SplitLinear[input_layer]/torch.nn.functional::linear_11': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 4}, 1: {'stage_cls': Partition1, 'inputs': {'input0': {'shape': torch.Size([32, 1225]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'Net/SplitLinear[input_layer]/torch.nn.functional::linear_11': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'Net/SplitLinear[input_layer]/prim::ListConstruct_13_0': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_1': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_2': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_3': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 3}, 2: {'stage_cls': Partition2, 'inputs': {'Net/SplitLinear[input_layer]/prim::ListConstruct_13_0': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_1': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_2': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'Net/SplitLinear[input_layer]/prim::ListConstruct_13_3': {'shape': torch.Size([32, 459]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'Net/torch.nn.functional::leaky_relu_19': {'shape': torch.Size([32, 1836]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 2}, 3: {'stage_cls': Partition3, 'inputs': {'Net/torch.nn.functional::leaky_relu_19': {'shape': torch.Size([32, 1836]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'Net/BatchNorm1d[bn2]': {'shape': torch.Size([32, 20000]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 1}, 4: {'stage_cls': Partition4, 'inputs': {'Net/BatchNorm1d[bn2]': {'shape': torch.Size([32, 20000]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'Net/Linear[output_layer]': {'shape': torch.Size([32, 1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = []
TENSORS = ['Net/SplitLinear[input_layer]/ParameterList[weights]/Parameter[2]', 'Net/SplitLinear[input_layer]/ParameterList[biases]/Parameter[2]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'p_0': 'input_layer.weights.2', 'p_1': 'input_layer.biases.2'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = torch.nn.functional.linear(x0, self.p_0, bias=self.p_1)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = []
TENSORS = ['Net/SplitLinear[input_layer]/ParameterList[weights]/Parameter[0]', 'Net/SplitLinear[input_layer]/ParameterList[weights]/Parameter[1]', 'Net/SplitLinear[input_layer]/ParameterList[weights]/Parameter[3]', 'Net/SplitLinear[input_layer]/ParameterList[biases]/Parameter[0]', 'Net/SplitLinear[input_layer]/ParameterList[biases]/Parameter[1]', 'Net/SplitLinear[input_layer]/ParameterList[biases]/Parameter[3]']
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'p_0': 'input_layer.weights.0', 'p_1': 'input_layer.weights.1', 'p_2': 'input_layer.weights.3', 'p_3': 'input_layer.biases.0', 'p_4': 'input_layer.biases.1', 'p_5': 'input_layer.biases.3'}
self.to(self.device)
def forward(self, *args):
(x0, x1) = unflatten(args, self.input_structure)
t_0 = torch.nn.functional.linear(x0, self.p_0, bias=self.p_3)
t_1 = torch.nn.functional.linear(x0, self.p_1, bias=self.p_4)
t_2 = torch.nn.functional.linear(x0, self.p_2, bias=self.p_5)
t_2 = [t_0, t_1, x1, t_2]
return list(flatten(t_2))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition2(nn.Module):
LAYER_SCOPES = ['Net/BatchNorm1d[bn1]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [[1, 1, 1, 1]]
self.lookup = {'l_0': 'bn1'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = torch.cat(x0, dim=(- 1))
t_0 = self.l_0(t_0)
t_0 = torch.nn.functional.leaky_relu(t_0, negative_slope=0.01, inplace=False)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition3(nn.Module):
LAYER_SCOPES = ['Net/Linear[h1_layer]', 'Net/BatchNorm1d[bn2]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'l_0': 'h1_layer', 'l_1': 'bn2'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = self.l_0(x0)
t_0 = self.l_1(t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition4(nn.Module):
LAYER_SCOPES = ['Net/Linear[h2_layer]', 'Net/BatchNorm1d[bn3]', 'Net/Linear[output_layer]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:4'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'l_0': 'h2_layer', 'l_1': 'bn3', 'l_2': 'output_layer'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = torch.nn.functional.leaky_relu(x0, negative_slope=0.01, inplace=False)
t_0 = self.l_0(t_0)
t_0 = self.l_1(t_0)
t_0 = torch.nn.functional.leaky_relu(t_0, negative_slope=0.01, inplace=False)
t_0 = self.l_2(t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=True)
|
def named_buffers(partition, recurse=True):
params = nn.Module.named_buffers(partition, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, recurse=True):
params = nn.Module.named_parameters(partition, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def create_pipeline_configuration(DEBUG=False, batch_size=8):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (StatelessEmbedding, T5LayerNorm, Dropout, T5Block, Linear), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___64': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___66': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_8': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___64': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___66': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___109': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___111': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___109': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___111': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___154': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___156': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___154': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___156': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___244': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___246': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___244': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___246': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___289': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___291': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___289': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___291': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_7': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___334': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___336': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_7': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___334': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___336': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___379': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___379': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_8': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_9': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_9': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___486': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___488': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___490': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_9': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_9': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___486': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___488': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___490': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_10': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_10': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___525': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___527': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___529': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_10': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_10': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___525': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___527': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___529': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_11': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_11': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___564': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___566': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___568': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_11': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_11': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___564': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___566': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___568': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_12': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_12': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___603': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___605': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___607': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_12': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_12': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___603': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___605': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___607': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_13': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_13': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___642': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___644': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___646': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_13': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_13': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___642': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___644': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___646': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_14': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_14': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___681': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___683': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___685': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_14': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_14': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___681': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___683': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___685': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_15': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_15': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___720': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___722': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___724': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_15': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_15': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___720': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___722': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___724': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]']
TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1]
self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0', 'l_3': 'encoder.block.1', 'l_4': 'encoder.block.2', 'l_5': 'decoder.embed_tokens', 'p_0': 'shared_embed_weight'}
self.to(self.device)
def forward(self, *args):
(attention_mask, decoder_attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure)
t_0 = decoder_input_ids.size()
t_1 = input_ids.size()
t_1 = t_1[(- 1)]
t_1 = input_ids.view((- 1), t_1)
t_1 = self.l_0(self.p_0, t_1)
t_1 = self.l_1(t_1)
t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))]
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_1 = self.l_2(t_1, attention_mask=t_2, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_3(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_4(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_4 = t_0[(- 1)]
t_4 = decoder_input_ids.view((- 1), t_4)
t_4 = self.l_5(self.p_0, t_4)
t_5 = t_0[0]
t_0 = t_0[1]
t_6 = torch.arange(t_0, device=self.device)
t_7 = t_6[(None, None, slice(None, None, None))]
t_0 = t_7.repeat(t_5, t_0, 1)
t_6 = t_6[(None, slice(None, None, None), None)]
t_6 = (t_0 <= t_6)
t_0 = decoder_attention_mask.dtype
t_0 = t_6.to(t_0)
t_0 = t_0[(slice(None, None, None), None, slice(None, None, None), slice(None, None, None))]
t_6 = decoder_attention_mask[(slice(None, None, None), None, None, slice(None, None, None))]
t_6 = (t_0 * t_6)
t_6 = t_6.to(dtype=torch.float32)
t_6 = (1.0 - t_6)
t_6 = (t_6 * (- 10000.0))
return list(flatten((t_2, t_3, t_1, t_4, t_6)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.3', 'l_1': 'encoder.block.4', 'l_2': 'encoder.block.5'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.6', 'l_1': 'encoder.block.7', 'l_2': 'encoder.block.8'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition3(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.9', 'l_1': 'encoder.block.10', 'l_2': 'encoder.block.11'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition4(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:4'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.12', 'l_1': 'encoder.block.13', 'l_2': 'encoder.block.14'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition5(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:5'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.15', 'l_1': 'encoder.block.16', 'l_2': 'encoder.block.17'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.18', 'l_1': 'encoder.block.19', 'l_2': 'encoder.block.20'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition7(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:7'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.21', 'l_1': 'encoder.block.22', 'l_2': 'encoder.block.23'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition8(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[2]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:8'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1]
self.lookup = {'l_0': 'encoder.final_layer_norm', 'l_1': 'encoder.dropout', 'l_2': 'decoder.dropout', 'l_3': 'decoder.block.0', 'l_4': 'decoder.block.1', 'l_5': 'decoder.block.2'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_0 = self.l_1(t_0)
t_1 = self.l_2(x1)
t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))]
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 1000000000.0))
t_1 = self.l_3(t_1, attention_mask=x2, position_bias=None, encoder_attention_mask=t_2, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_0)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_4 = t_1[2]
t_1 = t_1[3]
t_1 = self.l_4(t_3, attention_mask=x2, position_bias=t_4, encoder_attention_mask=t_2, encoder_decoder_position_bias=t_1, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_0)
t_4 = t_1[slice(None, 2, None)]
t_4 = t_4[0]
t_3 = t_1[2]
t_1 = t_1[3]
t_1 = self.l_5(t_4, attention_mask=x2, position_bias=t_3, encoder_attention_mask=t_2, encoder_decoder_position_bias=t_1, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=t_0)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_4 = t_1[2]
t_1 = t_1[3]
return list(flatten((t_0, x2, t_2, t_3, t_4, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition9(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:9'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.3', 'l_1': 'decoder.block.4', 'l_2': 'decoder.block.5'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition10(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[7]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[8]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:10'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.6', 'l_1': 'decoder.block.7', 'l_2': 'decoder.block.8'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition11(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[9]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:11'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.9', 'l_1': 'decoder.block.10', 'l_2': 'decoder.block.11'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition12(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:12'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.12', 'l_1': 'decoder.block.13', 'l_2': 'decoder.block.14'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition13(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:13'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.15', 'l_1': 'decoder.block.16', 'l_2': 'decoder.block.17'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition14(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:14'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.18', 'l_1': 'decoder.block.19', 'l_2': 'decoder.block.20'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition15(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:15'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.21', 'l_1': 'decoder.block.22', 'l_2': 'decoder.block.23', 'l_3': 'decoder.final_layer_norm', 'l_4': 'decoder.dropout', 'l_5': 'lm_head'}
self.to(self.device)
def forward(self, *args):
(labels, x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_1 = self.l_3(t_1)
t_2 = t_0[2]
t_0 = t_0[3]
t_1 = self.l_4(t_1)
t_1 = (t_1 * 0.03125)
t_1 = self.l_5(t_1)
t_3 = t_1.size((- 1))
t_3 = t_1.view((- 1), t_3)
t_1 = labels.view((- 1))
t_1 = torch.nn.functional.cross_entropy(t_3, t_1, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean')
return (t_1,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
def traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), full: bool=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (prefix is None):
prefix = type(module).__name__
for (name, sub_module) in module.named_children():
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
else:
if full:
(yield (sub_module, scope, module, False))
(yield from traverse_model(sub_module, (depth - 1), scope, basic_blocks, full))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def state_dict(partition, *args, **kwargs):
state = nn.Module.state_dict(partition, *args, **kwargs)
lookup = partition.lookup
result = dict()
for (k, v) in state.items():
if (k in lookup):
result[lookup[k]] = v
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
result[new_k] = v
return result
|
def load_state_dict(partition, state_dict, strict=True):
reverse_lookup = {v: k for (k, v) in partition.lookup.items()}
device = partition.device
keys = list(partition.state_dict(None).keys())
new_state = dict()
for k in keys:
if (k in reverse_lookup):
new_state[reverse_lookup[k]] = state_dict[k].to(device)
continue
idx = k.rfind('.')
to_replace = k[:idx]
if (to_replace in reverse_lookup):
key = (reverse_lookup[to_replace] + k[idx:])
new_state[key] = state_dict[k].to(device)
nn.Module.load_state_dict(partition, new_state, strict=strict)
|
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def named_parameters(partition, prefix='', recurse=True):
params = nn.Module.named_parameters(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v))
|
def cpu(partition):
partition.device = torch.device('cpu')
return nn.Module.cpu(partition)
|
def cuda(partition, device=None):
if (device is None):
device = torch.cuda.current_device()
partition.device = torch.device(device)
return nn.Module.cuda(partition, partition.device)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
def layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True)
|
def create_pipeline_configuration(DEBUG=False, batch_size=8):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (StatelessEmbedding, Linear, Dropout, T5Block, T5LayerNorm), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 6]}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [6]}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 6]}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [7]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 7}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::size_408': {'shape': torch.Size([2]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'used_by': [6]}, 'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___79': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___81': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 7}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___79': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___81': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___139': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___141': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 6}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___139': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___141': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 5}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___259': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___261': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 4}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___259': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___261': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___304': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___306': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 3}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___304': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___306': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___349': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___351': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 2}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::size_408': {'shape': torch.Size([2]), 'dtype': torch.Size, 'req_grad': False, 'is_batched': False, 'created_by': 0}, 'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___349': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___351': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___538': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___540': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___542': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 1}, 7: {'stage_cls': Partition7, 'inputs': {'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___538': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___540': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___542': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config
|
class Partition0(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]']
TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0', 'l_3': 'encoder.block.1', 'l_4': 'encoder.block.2', 'l_5': 'encoder.block.3', 'p_0': 'shared_embed_weight'}
self.to(self.device)
def forward(self, *args):
(attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure)
t_0 = decoder_input_ids.size()
t_1 = input_ids.size()
t_1 = t_1[(- 1)]
t_1 = input_ids.view((- 1), t_1)
t_1 = self.l_0(self.p_0, t_1)
t_1 = self.l_1(t_1)
t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))]
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_1 = self.l_2(t_1, attention_mask=t_2, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_3(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_4(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_5(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
return list(flatten((t_0, self.p_0, t_2, t_3, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition1(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[5]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.4', 'l_1': 'encoder.block.5', 'l_2': 'encoder.block.6', 'l_3': 'encoder.block.7'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[11]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.8', 'l_1': 'encoder.block.9', 'l_2': 'encoder.block.10', 'l_3': 'encoder.block.11'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition3(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[15]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:3'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.12', 'l_1': 'encoder.block.13', 'l_2': 'encoder.block.14', 'l_3': 'encoder.block.15'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_3(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
class Partition4(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:4'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.block.16', 'l_1': 'encoder.block.17', 'l_2': 'encoder.block.18'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=x0, position_bias=x2, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_1(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
t_0 = self.l_2(t_1, attention_mask=x0, position_bias=t_0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_0 = t_0[2]
return list(flatten((x0, t_1, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.