code stringlengths 17 6.64M |
|---|
def special_traverse_model(module: nn.Module, depth: int, prefix: Optional[str]=None, basic_blocks: Tuple[Type[nn.Module]]=(), special_blocks: Tuple[Type[nn.Module]]=(), next_special_bb_id=None, full: bool=False, mark=False) -> Iterator[Tuple[(nn.Module, str, nn.Module, Optional[bool], Optional[int])]]:
'\n iterate over model layers yielding the layer,layer_scope,encasing_module\n Parameters:\n -----------\n model:\n the model to iterate over\n depth:\n how far down in the model tree to go\n basic_blocks:\n a list of modules that if encountered will not be broken down\n full:\n whether to yield only layers specified by the depth and basic_block options or to yield all layers\n '
if (next_special_bb_id is None):
next_special_bb_id = 0
if (prefix is None):
prefix = type(module).__name__
for (child_idx, (name, sub_module)) in enumerate(module.named_children()):
scope = (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')
if ((len(list(sub_module.children())) == 0) or isinstance(sub_module, tuple(basic_blocks)) or (depth == 0)):
if full:
if mark:
if (special_blocks and isinstance(sub_module, tuple(special_blocks))):
next_special_bb_id += 0.01
(yield (sub_module, scope, module, True, next_special_bb_id))
else:
(yield (sub_module, scope, module, True, None))
else:
(yield (sub_module, scope, module, True))
else:
(yield (sub_module, scope, module))
elif isinstance(sub_module, tuple(special_blocks)):
if mark:
if (not hasattr(module, '_next_special_bb_id')):
next_special_bb_id += 0.01
sub_module._next_special_bb_id = next_special_bb_id
if full:
if mark:
(yield (sub_module, scope, module, False, next_special_bb_id))
else:
(yield (sub_module, scope, module, False))
(yield from special_traverse_model(sub_module, (depth - 1), scope, basic_blocks, special_blocks, (next_special_bb_id + child_idx), full, mark=mark))
else:
if full:
if mark:
(yield (sub_module, scope, module, False, None))
else:
(yield (sub_module, scope, module, False))
(yield from special_traverse_model(sub_module, (depth - 1), scope, basic_blocks, special_blocks, (next_special_bb_id + child_idx), full, mark=mark))
if (mark and hasattr(module, '_next_special_bb_id')):
delattr(module, '_next_special_bb_id')
|
def traverse_params_buffs(module: nn.Module, prefix: Optional[str]=None) -> Iterator[Tuple[(torch.tensor, str)]]:
"\n iterate over model's buffers and parameters yielding obj,obj_scope\n\n Parameters:\n -----------\n model:\n the model to iterate over\n "
if (prefix is None):
prefix = type(module).__name__
for (param_name, param) in module.named_parameters(recurse=False):
param_scope = f'{prefix}/{type(param).__name__}[{param_name}]'
(yield (param, param_scope))
for (buffer_name, buffer) in module.named_buffers(recurse=False):
buffer_scope = f'{prefix}/{type(buffer).__name__}[{buffer_name}]'
(yield (buffer, buffer_scope))
for (name, sub_module) in module.named_children():
(yield from traverse_params_buffs(sub_module, (((prefix + '/') + type(sub_module).__name__) + f'[{name}]')))
|
def layerDict(model: nn.Module, depth=1000, basic_blocks=()) -> Dict[(str, nn.Module)]:
return {s: l for (l, s, _) in traverse_model(model, depth, basic_blocks=basic_blocks)}
|
def tensorDict(model: nn.Module) -> OrderedDict[(str, Tensor)]:
return collections.OrderedDict(((s, t) for (t, s) in traverse_params_buffs(model)))
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(ts):
if isinstance(ts, torch.Size):
(yield ts)
elif isinstance(ts, (list, tuple, set)):
(yield from chain(*[flatten(t) for t in ts]))
elif isinstance(ts, dict):
(yield from chain(*[flatten(t) for (k, t) in sorted(ts.items(), key=(lambda t: t[0]))]))
else:
(yield ts)
|
def unflatten(xs, structure):
return _unflatten(xs, structure)[0]
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
if (not isinstance(structure, (list, tuple, set, dict))):
return (xs[0], 1)
if isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
assert isinstance(structure, dict)
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
|
def detach_tensors(ts):
def detach_if_tensor(t):
if isinstance(t, Tensor):
return t.detach().requires_grad_(t.requires_grad)
return t
return nested_map(detach_if_tensor, ts)
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def set_grad_mode(ts, require_grad):
def grad_mode(t):
if isinstance(t, Tensor):
return t.detach().requires_grad_((isinstance(t, nn.Parameter) or (require_grad and t.is_floating_point())))
return t
return nested_map(grad_mode, ts)
|
def get_tensor_dtypes(ts):
def get_dtype(t):
if isinstance(t, Tensor):
return t.dtype
return type(t)
return nested_map(get_dtype, ts)
|
def get_tensor_shapes(ts):
def get_shape(t):
if isinstance(t, Tensor):
return (t.shape if t.shape else torch.Size([1]))
elif isinstance(t, torch.Size):
return torch.Size([len(t)])
return None
return nested_map(get_shape, ts)
|
def get_device(ts) -> torch.device:
for t in flatten(ts):
if isinstance(t, Tensor):
return t.device
return torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
|
@contextmanager
def force_out_of_place(func):
prev_state = None
modified = False
if (hasattr(func, 'inplace') and isinstance(func.inplace, bool)):
prev_state = func.inplace
modified = True
setattr(func, 'inplace', False)
(yield)
if modified:
setattr(func, 'inplace', prev_state)
|
def get_call_site(*ignored_files) -> Optional[str]:
ignored_files = ((__file__,) + ignored_files)
curdir = os.path.dirname(os.path.realpath(__file__))
for f in inspect.stack():
frameinfo = inspect.getframeinfo(f[0])
file_name = frameinfo.filename
if ((file_name not in ignored_files) and (not file_name.startswith(curdir)) and (not ('torch\\' in file_name))):
return (((file_name + ', line ') + str(frameinfo.lineno)) + '\n')
return None
|
def convert_none_checks(input_file: str, output_file: str):
'utility to convert None checks which are unsupported by the traced to\n a convention we support\n\n we match patters like:\n if identifier is None => if is_None(identified)\n if identified is not None => if is_not_None(identifier)\n\n Args:\n ---------------------------------------------------------------\n input_file: str\n path to the python file we wish to convert\n \n output_file:str:\n path to the python output file to which write the result\n '
res = []
modified = False
with open(input_file, 'r') as f:
for (idx, original) in enumerate(f.readlines()):
is_None_pattern = '([a-zA-Z0-9_\\.\\(\\)\\[\\]\\-\\+\\*\\/]+) is None'
is_not_None_pattern = '([a-zA-Z0-9_\\.\\(\\)\\[\\]\\-\\+\\*\\/]+) is not None'
line = re.sub(is_None_pattern, 'is_None(\\1)', original)
line = re.sub(is_not_None_pattern, 'is_not_None(\\1)', line)
if (line != original):
modified = True
print(f'-I- changed line {idx}')
print(f'from {original.lstrip().rstrip()}')
print(f'to {line.lstrip().rstrip()}')
print()
res.append(line)
if modified:
lines = ['import operator\n']
lines.append('\n')
lines.append('\n')
lines.append(inspect.getsource(is_None))
lines.append('\n')
lines.append('\n')
lines.append(inspect.getsource(is_not_None))
lines.append('\n')
res = (lines + res)
with open(output_file, 'w') as f:
f.writelines(res)
|
class Parser(argparse.ArgumentParser, ABC):
'ArgumentParser for partitioning tasks,\n excluding tasks specific args (i.e model and data)\n '
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
model_args = self.add_argument_group('model_args')
self._add_model_args(model_args)
data_args = self.add_argument_group('data_args')
self._add_data_args(data_args)
partitioning_args = self.add_argument_group('partitioning_args')
self._add_partitioning_args(partitioning_args)
heuristics_args = self.add_argument_group('heuristics_args')
self._add_heurisitcs_args(heuristics_args)
presets_args = self.add_argument_group('presets')
self._add_presets_args(presets_args)
METIS_args = self.add_argument_group('METIS_args')
self._add_METIS_args(METIS_args)
acyclic_args = self.add_argument_group('acyclic_args')
self._add_acyclic_args(acyclic_args)
binpack_args = self.add_argument_group('binpack_args')
self._add_binpack_args(binpack_args)
mpipe_args = self.add_argument_group('mpipe_args')
self._add_mpipe_args(mpipe_args)
analysis_args = self.add_argument_group('analysis_args')
self._add_analysis_args(analysis_args)
extra_args = self.add_argument_group('extra_args')
extra_args.add_argument('--debug', action='store_true', default=False)
self._extra(extra_args)
self.set_defaults(**self._default_values())
@abstractmethod
def _add_model_args(self, group):
'add cmd args required for building the model that will be partitioned'
@abstractmethod
def _add_data_args(self, group):
'add cmd args required for providing input samples for partitioning'
def _add_analysis_args(self, group):
analysis_mode = group.add_mutually_exclusive_group()
analysis_mode.add_argument('--no_analysis', action='store_true', default=False, help='disable partition analysis')
analysis_mode.add_argument('--analysis_only', action='store_true', default=False, help='run only analysis for partitioned model')
group.add_argument('--analysis_batch_size', default=32, type=int, help='batch size to use during the post partition analysis')
group.add_argument('--analysis_as_async_pipeline', default=False, action='store_true', help='Force analysis as async pipeline')
def _add_heurisitcs_args(self, group):
group.add_argument('--bw', type=float, default=12, help='data transfer rate between gpus in GBps (Gigabytes per second)')
group.add_argument('--bwd_to_fwd_ratio', type=float, default=1, help='bwd to fwd ratio for heuristics')
group.add_argument('--auto_infer_node_bwd_to_fwd_ratio', action='store_true', default=False, help='Automatically infer bwd to fwd ratio for nodes (computation). Expected Ratio for edges should be given `by bwd_to_fwd_ratio`')
group.add_argument('--penalize_non_tensors', action='store_true', default=False, help='penalize edges with non tensor outputs by default no penalties are applied')
group.add_argument('--weight_mult_factor', type=float, default=10000.0, help='a constant to multiply weights with (useful if weights are really small)')
group.add_argument('--edge_penalty', type=float, default=10000000.0, help='multiplicative penalty for edges if `penalize_non_tensors` is set')
def _add_partitioning_args(self, group):
group.add_argument('-b', '--partitioning_batch_size', type=int, default=128)
group.add_argument('-p', '--n_partitions', type=int, default=4)
group.add_argument('-o', '--output_file', default='')
group.add_argument('--disable_autogenerated_name', action='store_true', default=False)
group.add_argument('--n_iter', type=int, default=10, help='number of iteration used in order to profile the network and run analysis')
group.add_argument('--no_recomputation', action='store_true', default=False, help='whether to (not) use recomputation for the backward pass')
group.add_argument('--depth', default=10000, type=int, help='the depth in which we will partition the model')
group.add_argument('--basic_blocks', nargs='*', default=[])
group.add_argument('--use_network_profiler', default=False, action='store_true', help='whether to use the old network_profiler instead of the newer graph based profiler')
group.add_argument('--sanity_check', default=False, action='store_true', help='whether to use do sanity check after partitioning')
group.add_argument('--disable_op_profiling', default=False, action='store_true', help='weheter to not profile ops when using the GraphProfiler')
group.add_argument('--partitioning_method', '-m', choices=['acyclic', 'metis', '2dbin', 'mpipe', 'pipedream'], default='acyclic')
group.add_argument('--generate_explicit_del', action='store_true', default=False, help='whether to generate del statements in partitioned code')
group.add_argument('--no_activation_propagation', action='store_true', default=False, help='whether to not propgate activations in the pipeline, and having direct sends instead')
group.add_argument('-a', '--async_pipeline', default=False, action='store_true', help='Do partitioning and analysis for async pipeline')
group.add_argument('--dont_use_async_meta_alg', default=False, action='store_true', help='Explicitly avoid the async meta alg. (e.g when number of stages is big)')
group.add_argument('--dot', default=False, action='store_true', help='Save and plot it using graphviz')
group.add_argument('--save_memory_mode', default=False, action='store_true', help=('Save memory during profiling by storing everything on cpu,' + ' but sending each layer to GPU before the profiling.'))
group.add_argument('--trace_on_gpu', default=False, action='store_true', help='Used together with save_memory_mode: if true, will trace the model on GPU despite swapping during profiling.')
group.add_argument('--force_no_recomputation_scopes', nargs='*', default=[])
group.add_argument('--cp', '--profiles_cache_name', default='', type=str, dest='profiles_cache_name', help='Profile cache to use in case of multiple runs')
group.add_argument('--overwrite_profiles_cache', action='store_true', default=False, help='overwrite profile cache')
group.add_argument('--ct', '--trace_cache_name', default='', type=str, dest='trace_cache_name', help='Trace cache to use in case of multiple runs')
group.add_argument('--overwrite_trace_cache', action='store_true', default=False, help='overwrite trace cache')
def _add_METIS_args(self, group):
group.add_argument('--metis_attempts', type=int, default=10, help='number of attempts for running the METIS partitioning algorithm')
group.add_argument('--metis_verbose_on_error', action='store_true', default=False, help='whether to print the cause of the error')
group.add_argument('--metis_seed', required=False, type=int, help='Random seed for Metis algorithm')
group.add_argument('--metis_compress', default=False, action='store_true', help='Compress')
group.add_argument('--metis_niter', type=int, help='Specifies the number of iterations for the refinement algorithms at each stage of the uncoarsening process.Default is 10.')
group.add_argument('--metis_nseps', type=int, help='Specifies the number of different separators that it will compute at each level of nested dissection.The final separator that is used is the smallest one. Default is 1.')
group.add_argument('--metis_ncuts', type=int, help='Specifies the number of different partitionings that it will compute. The final partitioning is the one that achieves the best edgecut or communication volume.Default is 1.')
group.add_argument('--metis_dbglvl', type=int, help='Metis debug level. Refer to the docs for explanation')
group.add_argument('--metis_objtype', type=int, help='Extra objective type to miminize (0: edgecut, 1: vol, default: edgecut)')
group.add_argument('--metis_contig', default=False, action='store_true', help='A boolean to create contigous partitions.')
def _add_binpack_args(self, group):
group.add_argument('--n_clusters', default=2, type=int, help='number of clusters in the model')
group.add_argument('--analyze_n_clusters', action='store_true', default=False, help='analyze number of clusters')
group.add_argument('--reminder_policy', type=str, choices=list(ReminderPolicy._value2member_map_.keys()), default='last', help=f'Policy for reminder items in cluster, {ReminderPolicy._value2member_map_}')
group.add_argument('--second_and_on_cluster_policy', type=str, choices=list(SecondAndOnClusterPolicy._value2member_map_.keys()), default='best_fit', help=f'Policy for 2nd and on cluster {SecondAndOnClusterPolicy._value2member_map_}')
group.add_argument('--THRESHOLD', type=float, default=0, help='values <= threshold will be contagious with closest stage')
def _add_mpipe_args(self, group):
group.add_argument('--special_blocks', type=str, nargs='*', default=[])
group.add_argument('--L', nargs='*', type=int, default=[])
def _add_acyclic_args(self, group):
group.add_argument('--epsilon', default=0.1, type=float, help='imbalance factor')
group.add_argument('--rounds', default=10, type=int, help='number of optimization rounds default is 10')
group.add_argument('--allocated_seconds', default=20, type=int, help='run time allocated to the partitioning algorithm default is 20 seconds')
group.add_argument('--multilevel', action='store_true', default=False, help='whether to use multilevel partitioning algorithm')
group.add_argument('--objective', choices=['edge_cut', 'stage_time'], default='stage_time', help='partitioning optimization objective')
group.add_argument('--constraint', choices=['time', 'memory'], default='time', help='partitioning constraint')
group.add_argument('--maximum_constraint_value', required=False, type=float, default=None, help='maximum constraint value a single stage can have,for example for memory constraint this is the maximum number of parameters a stage can have')
def _add_presets_args(self, group):
group.add_argument('--preset', choices=['ftpipe', 'pipedream', 'gpipe'], required=False, help='set preset partitioning and analysis arguments')
def parse_presets(self, args):
if (args.preset == 'ftpipe'):
args.async_pipeline = True
args.bwd_to_fwd_ratio = 1
elif (args.preset == 'pipedream'):
args.async_pipeline = False
args.bwd_to_fwd_ratio = 1
args.analysis_as_async_pipeline = True
elif (args.preset == 'gpipe'):
args.auto_infer_node_bwd_to_fwd_ratio = True
args.async_pipeline = False
args.bwd_to_fwd_ratio = (- 1)
args.analysis_as_async_pipeline = False
elif args.preset:
raise NotImplementedError()
return args
def _extra(self, group):
'add any extra cmd args which are task specific'
def _default_values(self):
' provide default cmd args values'
return dict()
def _post_parse(self, args, argv):
'handler for parsing unkonwn cmd args\n '
if argv:
msg = gettext('unrecognized arguments: %s')
self.error((msg % ' '.join(argv)))
return args
def _auto_file_name(self, args) -> str:
' provide a file name if args.output_file was not specified'
return ''
def parse_args(self, args=None, namespace=None) -> Namespace:
(args, extra) = super().parse_known_args(args, namespace)
self.parse_presets(args)
args.acyclic_opt = self._acyclic_opts_dict_from_parsed_args(args)
args.METIS_opt = self._metis_opts_dict_from_parsed_args(args)
args.binpack_opt = self._binpack_opts_dict_from_parsed_args(args)
args.mpipe_opt = self._mpipe_opts_dict_from_parsed_args(args)
if ((not args.disable_autogenerated_name) or (not args.output_file)):
args.output_file = self._auto_file_name(args)
if args.output_file.endswith('.py'):
args.output_file = args.output_file[:(- 3)]
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
args.device = device
args.force_no_recomputation_scopes_fn = (lambda scope: any(((s in scope) for s in args.force_no_recomputation_scopes)))
return self._post_parse(args, extra)
@staticmethod
def _acyclic_opts_dict_from_parsed_args(args):
' build acyclic partitioner options '
if (args.objective == 'edge_cut'):
objective = Objective.EDGE_CUT
else:
objective = Objective.STAGE_TIME
if args.multilevel:
meta_algorithm = META_ALGORITH.MULTI_LEVEL
else:
meta_algorithm = META_ALGORITH.SINGLE_LEVEL
if (args.constraint == 'time'):
constraint = Constraint.TIME
else:
constraint = Constraint.MEMORY
return {'epsilon': args.epsilon, 'rounds': args.rounds, 'allocated_seconds': args.allocated_seconds, 'meta_algorithm': meta_algorithm, 'objective': objective, 'constraint': constraint, 'maximum_constraint_value': args.maximum_constraint_value}
@staticmethod
def _metis_opts_dict_from_parsed_args(args):
' build metis options '
METIS_opt = {'verbose_on_error': getattr(args, 'metis_verbose_on_error', False), 'attempts': getattr(args, 'metis_attempts', 1000), 'seed': getattr(args, 'metis_seed', None), 'nseps': getattr(args, 'nseps', None), 'niter': getattr(args, 'metis_niter', None), 'compress': getattr(args, 'metis_compress', None), 'ncuts': getattr(args, 'metis_ncuts', None), 'objtype': getattr(args, 'metis_objtype', None), 'contig': getattr(args, 'metis_contig', None), '_dbglvl': 1}
return METIS_opt
def _binpack_opts_dict_from_parsed_args(self, args):
d = dict()
d['n_clusters'] = args.n_clusters
d['analyze_n_clusters'] = args.analyze_n_clusters
if hasattr(args, 'second_and_on_cluster_policy'):
d['second_and_on_cluster_policy'] = args.second_and_on_cluster_policy
if hasattr(args, 'reminder_policy'):
d['reminder_policy'] = args.reminder_policy
d['THRESHOLD'] = args.THRESHOLD
return d
def _mpipe_opts_dict_from_parsed_args(self, args):
d = dict()
d['depth'] = args.depth
d['L_list'] = args.L
return d
|
def download_and_extract(task, data_dir):
print(('Downloading and extracting %s...' % task))
data_file = ('%s.zip' % task)
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print('\tCompleted!')
|
def format_mrpc(data_dir, path_to_data):
print('Processing MRPC...')
mrpc_dir = os.path.join(data_dir, 'MRPC')
if (not os.path.isdir(mrpc_dir)):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, 'msr_paraphrase_train.txt')
mrpc_test_file = os.path.join(path_to_data, 'msr_paraphrase_test.txt')
else:
print(('Local MRPC data not specified, downloading data from %s' % MRPC_TRAIN))
mrpc_train_file = os.path.join(mrpc_dir, 'msr_paraphrase_train.txt')
mrpc_test_file = os.path.join(mrpc_dir, 'msr_paraphrase_test.txt')
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), ('Train data not found at %s' % mrpc_train_file)
assert os.path.isfile(mrpc_test_file), ('Test data not found at %s' % mrpc_test_file)
urllib.request.urlretrieve(TASK2PATH['MRPC'], os.path.join(mrpc_dir, 'dev_ids.tsv'))
dev_ids = []
with open(os.path.join(mrpc_dir, 'dev_ids.tsv'), encoding='utf8') as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with open(mrpc_train_file, encoding='utf8') as data_fh, open(os.path.join(mrpc_dir, 'train.tsv'), 'w', encoding='utf8') as train_fh, open(os.path.join(mrpc_dir, 'dev.tsv'), 'w', encoding='utf8') as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
(label, id1, id2, s1, s2) = row.strip().split('\t')
if ([id1, id2] in dev_ids):
dev_fh.write(('%s\t%s\t%s\t%s\t%s\n' % (label, id1, id2, s1, s2)))
else:
train_fh.write(('%s\t%s\t%s\t%s\t%s\n' % (label, id1, id2, s1, s2)))
with open(mrpc_test_file, encoding='utf8') as data_fh, open(os.path.join(mrpc_dir, 'test.tsv'), 'w', encoding='utf8') as test_fh:
header = data_fh.readline()
test_fh.write('index\t#1 ID\t#2 ID\t#1 String\t#2 String\n')
for (idx, row) in enumerate(data_fh):
(label, id1, id2, s1, s2) = row.strip().split('\t')
test_fh.write(('%d\t%s\t%s\t%s\t%s\n' % (idx, id1, id2, s1, s2)))
print('\tCompleted!')
|
def download_diagnostic(data_dir):
print('Downloading and extracting diagnostic...')
if (not os.path.isdir(os.path.join(data_dir, 'diagnostic'))):
os.mkdir(os.path.join(data_dir, 'diagnostic'))
data_file = os.path.join(data_dir, 'diagnostic', 'diagnostic.tsv')
urllib.request.urlretrieve(TASK2PATH['diagnostic'], data_file)
print('\tCompleted!')
return
|
def get_tasks(task_names):
task_names = task_names.split(',')
if ('all' in task_names):
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert (task_name in TASKS), ('Task %s not found!' % task_name)
tasks.append(task_name)
return tasks
|
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', type=str, default='all')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', type=str, default='')
args = parser.parse_args(arguments)
if (not os.path.isdir(args.data_dir)):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if (task == 'MRPC'):
format_mrpc(args.data_dir, args.path_to_mrpc)
elif (task == 'diagnostic'):
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
|
def download_file(url, DATA_DIR=''):
local_filename = url.split('/')[(- 1)]
local_filename = os.path.join(DATA_DIR, local_filename)
if os.path.exists(local_filename):
print(f'-I- file {local_filename} already exists, skipping download.')
return local_filename
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
return local_filename
|
def download_wiki2(DATA_DIR=''):
URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip'
path_to_zip_file = download_file(URL)
print(f'-I- Donwloaded wikitext2 to {path_to_zip_file}. Extracting...')
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(DATA_DIR)
print('-I- Done')
|
def download_wiki103(DATA_DIR=''):
URL = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip'
path_to_zip_file = download_file(URL)
print(f'-I- Donwloaded wikitext103 to {path_to_zip_file}. Extracting...')
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(DATA_DIR)
print('-I- Done')
|
def get_df(L_to_minmax, L_to_num_stages, L_to_best_objective):
def list_keys(x):
return list(x.keys())
assert (list_keys(L_to_num_stages) == list_keys(L_to_best_objective) == list_keys(L_to_minmax))
records = [dict(L=L, stages=stages, objective=objective) for (L, stages, objective) in zip(L_to_num_stages.keys(), L_to_num_stages.values(), L_to_best_objective.values())]
df = pd.DataFrame.from_records(records)
df['objective'] /= 10000.0
return df
|
def plot_L_to_objective(df):
sns.barplot(x='L', y='objective', data=df)
|
def t5_3b():
L_to_minmax = {8: 6636137.099132873, 16: 5638619.469868817, 24: 4589449.469869904, 32: 4287169.033868238, 40: 4103992.787624088, 48: 4155925.9036572957, 56: 4201891.442869065, 64: 4098424.4248143705}
L_to_num_stages = {8: 8, 16: 15, 24: 23, 32: 31, 40: 35, 48: 45, 56: 46, 64: 61}
L_to_best_objective = {8: 5529952.12069609, 16: 4083569.301969547, 24: 3890990.6952651218, 32: 3749676.0803660783, 40: 3725981.80166127, 48: 3727633.9440387157, 56: 3726344.331668224, 64: 3726255.025884728}
df = get_df(L_to_minmax, L_to_num_stages, L_to_best_objective)
sns.barplot(x='L', y='objective', data=df)
print(df)
plt.show()
|
def t5_base():
L_to_minmax = {8: 1134675.3105777686, 16: 849120.8780806304, 24: 941463.7233041492, 32: 804757.1768176018, 40: 805675.4889778851, 48: 774942.1858372729, 56: 789406.8785677295, 64: 766349.086868281}
L_to_num_stages = {8: 8, 16: 15, 24: 23, 32: 27, 40: 33, 48: 45, 56: 48, 64: 60}
L_to_best_objective = {8: 929510.4466206762, 16: 715666.8662249836, 24: 735406.3966344037, 32: 712613.3374136146, 40: 700121.7308791562, 48: 695731.4327363339, 56: 695933.99863597, 64: 696800.3580791669}
df = get_df(L_to_minmax, L_to_num_stages, L_to_best_objective)
sns.barplot(x='L', y='objective', data=df)
print(df)
plt.show()
|
def parse_cli() -> Tuple[(Namespace, Dict, PartitioningTask)]:
task_parser = argparse.ArgumentParser(description='partitioning task parser', add_help=False)
task_parser.add_argument('partitioning_task', help='partitioning task to perform')
(task, rest) = task_parser.parse_known_args()
(parser_cls, partitioner_cls) = get_parser_and_partitioner(task.partitioning_task)
parser: Parser = parser_cls()
cmd_args = parser.parse_args(args=rest)
cmd_args.partitioning_task = task.partitioning_task
model_args = dict()
for group in parser._action_groups:
if (group.title == 'model_args'):
model_args = {a.dest: getattr(cmd_args, a.dest, None) for a in group._group_actions}
break
return (cmd_args, model_args, partitioner_cls(cmd_args))
|
def main(cmd_args: Namespace, model_args: Dict, partitioner: PartitioningTask, override_dict: Optional[Dict]=None):
for (i, v) in override_dict.items():
if (i in model_args):
raise ValueError(f'''override dict should not modify model creation arguments got {i}
the intended use is for modifying partitioning/hueristics related values''')
setattr(cmd_args, i, v)
if getattr(cmd_args, 'sanity_check', False):
torch.manual_seed(0)
torch.cuda.synchronize()
model = partitioner.get_model(cmd_args).train()
sample = partitioner.get_input(cmd_args, analysis=False)
if isinstance(sample, dict):
kwargs = sample
args = tuple()
elif isinstance(sample, tuple):
kwargs = dict()
args = sample
else:
kwargs = dict()
args = (sample,)
del sample
(node_weight_function, edge_weight_function) = get_weight_functions(cmd_args, verbose=True)
profiles_cache_name = cmd_args.profiles_cache_name
trace_cache_name = cmd_args.trace_cache_name
overwrite_profiles_cache = cmd_args.overwrite_profiles_cache
overwrite_trace_cache = cmd_args.overwrite_trace_cache
partitioner.register_functions()
if (not cmd_args.analysis_only):
if (profiles_cache_name and os.path.exists(profiles_cache_name) and overwrite_profiles_cache):
os.remove(profiles_cache_name)
if (trace_cache_name and os.path.exists(trace_cache_name) and overwrite_trace_cache):
os.remove(trace_cache_name)
if (not cmd_args.save_memory_mode):
with torch.no_grad():
(model, args, kwargs) = move_tensors((model, args, kwargs), cmd_args.device)
cmd_args.basic_blocks = choose_blocks(model, cmd_args, blocks_arg_name='basic_blocks')
cmd_args.mpipe_opt['special_blocks'] = choose_blocks(model, cmd_args, blocks_arg_name='special_blocks')
cmd_args.mpipe_opt['basic_blocks'] = cmd_args.basic_blocks
graph = pipe_model(model, partitioner.batch_dim, model_args=args, model_kwargs=kwargs, n_iter=cmd_args.n_iter, nparts=cmd_args.n_partitions, depth=cmd_args.depth, basic_blocks=cmd_args.basic_blocks, node_weight_function=node_weight_function, edge_weight_function=edge_weight_function, use_layers_only_graph=True, output_file=cmd_args.output_file, generate_explicit_del=cmd_args.generate_explicit_del, generate_activation_propagation=(not cmd_args.no_activation_propagation), recomputation=(not cmd_args.no_recomputation), partitioning_method=cmd_args.partitioning_method, METIS_opt=cmd_args.METIS_opt, acyclic_opt=cmd_args.acyclic_opt, binpack_opt=cmd_args.binpack_opt, mpipe_opt=cmd_args.mpipe_opt, force_no_recomp_scopes=cmd_args.force_no_recomputation_scopes_fn, save_memory_mode=cmd_args.save_memory_mode, trace_on_gpu=cmd_args.trace_on_gpu, use_graph_profiler=(not cmd_args.use_network_profiler), use_network_profiler=cmd_args.use_network_profiler, profile_ops=(not cmd_args.disable_op_profiling), graph=None, async_pipe=cmd_args.async_pipeline, trace_cache_name=trace_cache_name, profiles_cache_name=profiles_cache_name, dont_use_async_meta_alg=cmd_args.dont_use_async_meta_alg)
del args, kwargs
if cmd_args.dot:
try:
graph.save_as_pdf(cmd_args.output_file, 'graphs', node_weight_function=node_weight_function, edge_weight_function=edge_weight_function)
except Exception as e:
print('Error saving graph as pdf')
raise e
record_cmdline(cmd_args.output_file)
with open(f'{cmd_args.output_file}.py', 'a') as f:
f.write('\n')
f.write(f'model_args = {model_args}')
if hasattr(partitioner, 'record_transformer_cfg'):
partitioner.record_transformer_cfg(cmd_args=cmd_args)
if override_dict:
with open(f'{cmd_args.output_file}.py', 'a') as f:
f.write('\n')
f.write(f'override_dict = {override_dict}')
else:
graph = None
if (not cmd_args.no_analysis):
if (sys.platform == 'win32'):
module_path = cmd_args.output_file.replace('\\', '.')
else:
module_path = cmd_args.output_file.replace('/', '.')
generated = importlib.import_module(module_path)
create_pipeline_configuration = generated.create_pipeline_configuration
config = create_pipeline_configuration(DEBUG=True)
layers = layerDict(model, depth=config['depth'], basic_blocks=config['basic_blocks'])
tensors = tensorDict(model)
analysis_config = convert_to_analysis_format(config, layers, tensors)
del layers, tensors
if getattr(cmd_args, 'sanity_check', False):
print('-I- running sanity check')
run_sanity_check(cmd_args, partitioner, analysis_config, device=cmd_args.device, training=True, check_grads=True, ref_model=None, check_init=False)
sample = partitioner.get_input(cmd_args, analysis=True)
analysis_kwargs = dict(sample=sample, graph=graph, config=analysis_config, n_iter=cmd_args.n_iter, recomputation=(not cmd_args.no_recomputation), bw_GBps=cmd_args.bw, verbose=True, async_pipeline=(cmd_args.async_pipeline or cmd_args.analysis_as_async_pipeline), sequential_model=model)
if (cmd_args.partitioning_method != 'ACYCLIC'):
gpu_to_stages = defaultdict(set)
stage_to_gpu = dict()
for n in graph.non_input_nodes:
if ((n.gpu_id is None) or (n.type == NodeTypes.CONSTANT)):
continue
gpu_to_stages[n.gpu_id].add(n.stage_id)
if (n.stage_id in stage_to_gpu):
assert (stage_to_gpu[n.stage_id] == n.gpu_id), (stage_to_gpu[n.stage_id], n.gpu_id)
else:
assert (n.gpu_id is not None)
stage_to_gpu[n.stage_id] = n.gpu_id
if gpu_to_stages:
analysis_kwargs['stages_on_same_gpu'] = list(gpu_to_stages.values())
stage_to_gpu = [stage_to_gpu[i] for i in sorted(stage_to_gpu.keys())]
print('stage_to_gpu', stage_to_gpu)
analysis_kwargs = partitioner.update_analysis_kwargs(cmd_args, analysis_config, analysis_kwargs)
(analysis_result, summary) = run_analysis(**analysis_kwargs)
with open(f'{cmd_args.output_file}.py', 'a') as f:
f.write('\n')
f.write(((('"""analysis summary\n' + summary) + '\n') + '"""'))
else:
analysis_result = summary = None
partitioner.post_partitioning(cmd_args, graph, analysis_result, summary)
return (analysis_result, cmd_args.output_file)
|
def choose_blocks(model, args, blocks_arg_name='basic_blocks') -> Tuple[torch.nn.Module]:
blocks = dict()
for m in model.modules():
m_superclasses = {c.__name__: c for c in type(m).mro()}
blocks.update(m_superclasses)
blocks: Dict[(str, torch.nn.Module)]
if (getattr(args, blocks_arg_name) is None):
setattr(args, blocks_arg_name, [])
try:
return tuple([blocks[name] for name in getattr(args, blocks_arg_name)])
except KeyError:
raise ValueError(f'invalid {blocks_arg_name} possible blocks are {list(blocks.keys())}')
|
def record_cmdline(output_file):
'Add cmdline to generated python output file.'
cmdline = ' '.join(map(shlex.quote, sys.argv[1:]))
python_output_file = (output_file + '.py')
cmdline = ((((('"""' + 'AutoGenerated with:\n') + 'python -m autopipe.partition ') + cmdline) + '\n') + '"""')
if (sys.platform == 'win32'):
cmdline = ('r' + cmdline)
with open(python_output_file, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(((cmdline.rstrip('\r\n') + '\n') + content))
|
def record_transformer_cfg(python_output_file, args, model_type, explicitly_set_dict=dict(), do_resize_token_embedding=False):
t = Template("\n\ndef ${function_name}():\n return dict(model_type='${model_type}',\n model_name_or_path='${model_name_or_path}',\n do_lower_case=${do_lower_case},\n output_past=False,\n stateless_tied=${stateless_tied},\n explicitly_set_dict=${explicitly_set_dict},\n do_resize_token_embedding=${do_resize_token_embedding},\n )\n ")
stateless_tied = getattr(args, 'stateless_tied', False)
do_lower_case = getattr(args, 'do_lower_case', False)
basename = os.path.basename(python_output_file)
if basename.endswith('.py'):
basename = basename[:(- 3)]
mapping = dict(function_name=basename, model_type=model_type, model_name_or_path=args.model_name_or_path, do_lower_case=do_lower_case, stateless_tied=stateless_tied, explicitly_set_dict=explicitly_set_dict, do_resize_token_embedding=do_resize_token_embedding)
s = t.substitute(mapping)
with open(python_output_file, 'a') as f:
f.write(s)
|
def bruteforce_main(main, main_kwargs=None, override_dicts=None, NUM_RUNS=2, TMP='/tmp/partitioning_outputs/', remove_tmp=False):
if (main_kwargs is None):
main_kwargs = dict()
results = {}
best = None
if (override_dicts is None):
override_dicts = []
if (not override_dicts):
override_dicts = [{}]
os.makedirs(TMP, exist_ok=True)
DICT_PREFIX = '_d%d'
current_dict_prefix = ''
last_exception = None
for (i, override_dict) in enumerate(override_dicts):
if (i > 0):
current_dict_prefix = DICT_PREFIX.format(i)
for counter in range(NUM_RUNS):
main_kwargs['override_dict'] = override_dict
if (NUM_RUNS > 1):
try:
out = main(**main_kwargs)
except (Exception, RuntimeError, AssertionError) as e:
last_exception = e
continue
else:
out = main(**main_kwargs)
(analysis_result, output_file) = out
name = output_file
orig_name = name
flag = False
if (name in results):
if name.endswith('.py'):
name = name[:(- 3)]
flag = True
while (((name + '.py') in results) or flag):
flag = False
name += f'_{counter}'
name += current_dict_prefix
new_path = pathlib.Path(TMP, (os.path.basename(name) + '.py'))
copyfile((orig_name + '.py'), new_path)
results[name] = analysis_result
if (best is None):
best = (new_path, analysis_result)
elif (analysis_result > best[1]):
best = (new_path, analysis_result)
print(f'best: {best}')
if (best is None):
print('-I- hyper parameter search failed raising last exception')
raise last_exception
copyfile(best[0], (orig_name + '.py'))
print(f'-I- copied best to {orig_name}.py')
if remove_tmp:
rmtree(TMP)
|
def register_task(task_name, parser_cls: Type[Parser], partitioner_cls: Type[PartitioningTask]):
if (not isinstance(task_name, str)):
raise ValueError(f'task name must be a string got {task_name} of type {type(task_name).__name__}')
elif (task_name in REGISTRY):
raise ValueError(f'task {task_name} is already registered with values {REGISTRY[task_name]}')
elif (not issubclass(parser_cls, Parser)):
raise TypeError(f'registered parser must be a subclass of Parser class got {type(parser_cls).__name__}')
elif (not issubclass(partitioner_cls, PartitioningTask)):
raise TypeError(f'registered partitioner must be a subclass of Partitioner class got {type(partitioner_cls).__name__}')
REGISTRY[task_name] = (parser_cls, partitioner_cls)
|
def get_parser_and_partitioner(task_name) -> Tuple[(Type[Parser], Type[PartitioningTask])]:
if (task_name in REGISTRY):
return REGISTRY[task_name]
else:
raise ValueError(f'unknown task {task_name} available tasks {list(REGISTRY.keys())}')
|
def import_tasks_from_dir(tasks_dir=os.path.dirname(__file__)):
' Automatically import any Python files in the tasks directory\n in order to automatically register all available tasks\n Args:\n tasks_dir: task dir to import from\n '
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if ((not file.startswith('_')) and (not file.startswith('.')) and (file.endswith('.py') or os.path.isdir(path))):
task_name = (file[:file.find('.py')] if file.endswith('.py') else file)
if (task_name == 'new_t5'):
import transformers
if (transformers.__version__ < '4.4.1'):
continue
importlib.import_module(('.tasks.' + task_name), package='autopipe')
|
def load_and_cache_examples(args, tokenizer):
input_dir = (args.data_dir if args.data_dir else '.')
cached_features_file = os.path.join(input_dir, 'cached_{}_{}_{}'.format('train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length)))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
features_and_dataset = torch.load(cached_features_file)
(features, dataset, examples) = (features_and_dataset['features'], features_and_dataset['dataset'], features_and_dataset['examples'])
else:
logger.info('Creating features from dataset file at %s', input_dir)
if ((not args.data_dir) and (not args.train_file)):
raise NotImplementedError()
else:
processor = (SquadV2Processor() if args.version_2_with_negative else SquadV1Processor())
examples = processor.get_train_examples(args.data_dir, filename=args.train_file)
(features, dataset) = squad_convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True, return_dataset='pt', threads=args.threads)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save({'features': features, 'dataset': dataset, 'examples': examples}, cached_features_file)
return dataset
|
class ParsePartitioningOptsSquad(Parser):
def _add_model_args(self, group):
group.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name in huggingface/models')
group.add_argument('--precompute_attention_mask', action='store_true', default=False, help='whether to compute attention mask inside or outside the model')
group.add_argument('--max_seq_length', default=384, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
group.add_argument('--max_query_length', default=64, type=int, help='The maximum number of tokens for the question. Questions longer than this will be truncated to this length.')
group.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
def _add_data_args(self, group):
group.add_argument('--data_dir', default=None, type=str, help=('The input data dir. Should contain the .json files for the task.' + 'If no data dir or train/predict files are specified, will run with tensorflow_datasets.'))
group.add_argument('--train_file', default=None, type=str, help=('The input training file. If a data dir is specified, will look for the file there' + 'If no data dir or train/predict files are specified, will run with tensorflow_datasets.'))
group.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
group.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.')
group.add_argument('--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.')
group.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
group.add_argument('--threads', type=int, default=4, help='multiple threads for converting example to features')
def _default_values(self):
return {'partitioning_batch_size': 1, 'n_iter': 1, 'n_partitions': 2, 'bw': 12, 'analysis_batch_size': 1}
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.model_name_or_path).replace('-', '_')
seq_len_str = f's_{args.max_seq_length}'
model_str += seq_len_str
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
m = args.partitioning_method.lower()
tmp = (m if (m != '2dbin') else 'virtual_stages')
output_file += f'_{tmp}'
return output_file
|
class BertPartitioner(PartitioningTask):
def __init__(self, args) -> None:
super().__init__(args)
self.tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
@property
def batch_dim(self) -> int:
return 0
def get_model(self, args) -> torch.nn.Module:
config = BertConfig.from_pretrained(args.model_name_or_path, cache_dir=(args.cache_dir if args.cache_dir else None))
setattr(config, 'precompute_attention_mask', args.precompute_attention_mask)
setattr(config, 'return_dict', False)
model = BertForQuestionAnswering.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None)).train()
return model
def get_input(self, args, analysis=False):
return get_inputs_squad(args, self.tokenizer, analysis=analysis)
def register_functions(self):
register_new_explicit_untraced_function(operator.is_, operator)
register_new_explicit_untraced_function(operator.is_not, operator)
register_new_traced_function(math.sqrt, math)
def record_transformer_cfg(self, cmd_args):
record_transformer_cfg(python_output_file=f'{cmd_args.output_file}.py', args=cmd_args, model_type='bert_squad', explicitly_set_dict={'precompute_attention_mask': cmd_args.precompute_attention_mask, 'return_dict': False}, do_resize_token_embedding=False)
|
def get_inputs_squad(args, tokenizer, analysis=False):
batch_size = (args.analysis_batch_size if analysis else args.partitioning_batch_size)
train_dataset = load_and_cache_examples(args, tokenizer)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)
batch = next(iter(train_dataloader))
batch = tuple((t.to(args.device) for t in batch))
if args.precompute_attention_mask:
attention_mask = get_extended_attention_mask(batch[1], batch[0])
else:
attention_mask = batch[1]
inputs = {'input_ids': batch[0], 'attention_mask': attention_mask, 'token_type_ids': batch[2]}
return inputs
|
class CEPParser(Parser):
def _add_model_args(self, group):
group.add_argument('--N', type=int, default=361)
group.add_argument('--C', type=int, default=10000)
def _add_data_args(self, group):
group.add_argument('--K', type=int, default=18)
group.add_argument('--samples_num', type=int, default=100000000000.0)
def _default_values(self):
return {'n_iter': 1, 'n_partitions': 4, 'bw': 12, 'partitioning_batch_size': 1, 'analysis_batch_size': 1}
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str('cep_net').replace('-', '_')
model_str += f'N{args.N}_C{args.C}'
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
m = args.partitioning_method.lower()
tmp = (m if (m != '2dbin') else 'virtual_stages')
output_file += f'_{tmp}'
return output_file
|
class CEPPartitioningTask(PartitioningTask):
def __init__(self, args) -> None:
super().__init__(args)
@property
def batch_dim(self) -> int:
return 0
def register_functions(self):
pass
def get_model(self, args) -> torch.nn.Module:
return Net(n=args.N, c=args.C)
def get_input(self, args, analysis=False):
dataset = Dataset(n=args.N, k=args.K, max_samples_num=args.samples_num)
batch_size = (args.analysis_batch_size if analysis else args.partitioning_batch_size)
loader = DataLoader(dataset, batch_size=batch_size)
return next(iter(loader))[0]
|
class DumT5Partitioner(T5Partitioner):
def get_model(self, args) -> torch.nn.Module:
explicitly_set_dict = {'return_dict': False, 'use_cache': False, 'output_attentions': False, 'output_hidden_states': False, 'output_only': True, 'precomputed_masks': args.precompute_masks}
config_class = T5Config
config = config_class.from_pretrained('t5-11b')
config.num_layers = 1
config.num_decoder_layers = 1
for (k, v) in explicitly_set_dict.items():
setattr(config, k, v)
tokenizer = T5Tokenizer.from_pretrained('t5-11b')
self.tokenizer = tokenizer
self.config = config
model = TiedT5ForConditionalGeneration(config)
do_resize_token_embedding = True
stateless_tied = True
if do_resize_token_embedding:
resize_token_embeddings(model, tokenizer)
if stateless_tied:
model_to_resize = (model.module if hasattr(model, 'module') else model)
if hasattr(model_to_resize, 'make_stateless_after_loaded_tied_and_resized'):
model_to_resize.make_stateless_after_loaded_tied_and_resized()
elif hasattr(model_to_resize, 'make_stateless'):
model_to_resize.make_stateless()
else:
raise ValueError(f'Problem making model stateless. ')
return model
def get_input(self, args, analysis=False):
batch = super().get_input(args, analysis=analysis)
if (self.config.num_decoder_layers == 0):
del batch['decoder_input_ids']
del batch['decoder_attention_mask']
return batch
|
class FunctionalModel(torch.nn.Module):
def __init__(self):
super(FunctionalModel, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM))
self.w2 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM))
self.w3 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM))
self.w4 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM))
self.w5 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM))
def forward(self, x):
x = F.relu(F.linear(x, self.w1))
x = F.relu(F.linear(x, self.w2))
x = F.relu(F.linear(x, self.w3))
x = F.relu(F.linear(x, self.w4))
x = F.relu(F.linear(x, self.w5))
x = F.dropout(F.linear(x, self.w1))
return x
|
class DumTFunctionalModelPartitioner(T5Partitioner):
def get_model(self, args) -> torch.nn.Module:
return FunctionalModel()
def get_input(self, args, analysis=False):
if analysis:
return torch.randn(args.analysis_batch_size, _MODEL_DIM)
return torch.randn(args.partitioning_batch_size, _MODEL_DIM)
|
def make_just_x(ds):
d = defaultdict(list)
for feature in ds:
for (key, val) in vars(feature).items():
if (key == 'label'):
continue
if (val is None):
continue
d[key].append(val)
print(d.keys())
return TensorDataset(*[torch.tensor(x) for x in d.values()])
|
def get_dataset(args, tokenizer, cache_name='glue_ds.pt'):
cache_name += args.model_name_or_path
if (os.path.exists(cache_name) and (not args.overwrite_cache)):
print(f'-I- loading dataset from cahce {cache_name}...')
flag = False
try:
ds = torch.load(cache_name)
res = ds
except Exception as _:
print('-I- loading from cache failed. Creating new dataset. will not overwrite_cache.')
res = None
return res
print('-I- creating dataset')
data_dir = args.data_dir
task_dir = TASK_NAME_TO_DATA_DIR.get(args.task_name)
data_dir = os.path.join(data_dir, task_dir)
glue_args = GlueDataTrainingArguments(task_name=args.task_name, data_dir=data_dir, max_seq_length=args.max_seq_length, overwrite_cache=args.overwrite_cache)
ds = GlueDataset(glue_args, tokenizer, mode='train')
ds = make_just_x(ds)
if ((not os.path.exists(cache_name)) or args.overwrite_cache):
print('-I- dataset saved')
torch.save(ds, cache_name)
print('-I- DONE creating dataset')
return ds
|
def get_sample(args, tokenizer, analysis=False):
train_dataset = get_dataset(args, tokenizer)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=(args.analysis_batch_size if analysis else args.partitioning_batch_size))
batch = next(iter(train_dataloader))
if args.precompute_attention_mask:
attention_mask = get_extended_attention_mask(batch[1], batch[0])
else:
attention_mask = batch[1]
inputs = {'input_ids': batch[0], 'attention_mask': attention_mask}
if (args.model_type == 'bert'):
inputs['token_type_ids'] = batch[2]
return inputs
|
class ParsePartitioningOptsGlue(Parser):
def _add_model_args(self, group):
group.add_argument('--task_name', type=str, default='mnli', help='Glue task')
group.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_TYPES)))
group.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name.')
group.add_argument('--precompute_attention_mask', action='store_true', default=False, help='whether to compute attention mask inside or outside the model')
group.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
group.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
def _add_data_args(self, group):
group.add_argument('--data_dir', default='/home_local/saareliad/data/glue_data/', type=str, help='The input data dir. Should contain the files for the task.')
group.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
group.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
def _default_values(self):
d = {'partitioning_batch_size': 1, 'n_iter': 1, 'n_partitions': 2, 'bw': 12, 'analysis_batch_size': 1}
return d
def _post_parse(self, args, argv):
args.model_type = args.model_type.lower()
return super()._post_parse(args, argv)
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.model_name_or_path).replace('-', '_')
seq_len_str = f's_{args.max_seq_length}'
model_str += seq_len_str
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
output_file += f'_{args.task_name}'
output_file += '_glue'
m = args.partitioning_method.lower()
tmp = (m if (m != '2dbin') else 'virtual_stages')
output_file += f'_{tmp}'
return output_file
|
class GluePartitioner(PartitioningTask):
def __init__(self, args) -> None:
super().__init__(args)
self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
@property
def batch_dim(self) -> int:
return 0
def get_input(self, args, analysis=False):
return get_sample(args, self.tokenizer, analysis=analysis)
def get_model(self, args) -> torch.nn.Module:
config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=(args.cache_dir if args.cache_dir else None))
setattr(config, 'precompute_attention_mask', args.precompute_attention_mask)
config.num_labels = glue_tasks_num_labels.get(args.task_name)
model_cls = {'bert': BertForSequenceClassification, 'roberta': RobertaForSequenceClassification}
model_cls = model_cls[args.model_type]
model = model_cls.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None)).train()
return model
def register_functions(self):
register_new_explicit_untraced_function(operator.is_, operator)
register_new_explicit_untraced_function(operator.is_not, operator)
register_new_traced_function(math.sqrt, math)
|
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train', block_size=512):
assert os.path.isfile(file_path), file_path
(directory, filename) = os.path.split(file_path)
cached_features_file = os.path.join(directory, ((((args.model_name_or_path + '_cached_lm_') + str(block_size)) + '_') + filename))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
else:
self.examples = []
with open(file_path, encoding='utf-8') as f:
text = f.read()
tokenized_text = tokenizer.tokenize(text)
tokenized_text = tokenizer.convert_tokens_to_ids(tokenized_text)
for i in range(0, ((len(tokenized_text) - block_size) + 1), block_size):
self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:(i + block_size)]))
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item])
|
def load_and_cache_examples(args, tokenizer):
return TextDataset(tokenizer, args, file_path=args.train_data_file, block_size=args.block_size)
|
class ParsePartitioningOptsLM(Parser):
def _add_model_args(self, group):
group.add_argument('--model_name_or_path', default='gpt2', type=str, help='The model checkpoint for weights initialization.')
group.add_argument('--lmhead', default=False, action='store_true', help='Partition a model with LM head')
group.add_argument('--stateless_tied', default=False, action='store_true', help='Tie weights stateless trick. Note that shared weight may be sent in pipe')
group.add_argument('--block_size', default=(- 1), type=int, help='Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to the model max input length for single sentence inputs (take into account special tokens).')
group.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
def _add_data_args(self, group):
group.add_argument('--train_data_file', default=None, type=str, required=True, help='The input training data file (a text file).')
group.add_argument('--cache_dir', default='', type=str, help='Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)')
group.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
def _default_values(self):
d = {'partitioning_batch_size': 1, 'n_iter': 10, 'n_partitions': 4, 'bw': 12, 'analysis_batch_size': 1, 'force_no_recomputation_scopes': ['stateless_lm_head', 'lm_head']}
return d
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.model_name_or_path).replace('-', '_')
tied = ('tied' if args.stateless_tied else 'untied')
model_str += f'_{tied}'
if args.lmhead:
model_str += '_lmhead'
seq_len_str = f's_{args.block_size}'
model_str += seq_len_str
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
m = args.partitioning_method.lower()
tmp = (m if (m != '2dbin') else 'virtual_stages')
output_file += f'_{tmp}'
return output_file
|
class GPT2Partitioner(PartitioningTask):
def __init__(self, args) -> None:
super().__init__(args)
self.tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
if (args.block_size <= 0):
args.block_size = self.tokenizer.max_len_single_sentence
args.block_size = min(args.block_size, self.tokenizer.max_len_single_sentence)
self.ds = load_and_cache_examples(args, self.tokenizer)
@property
def batch_dim(self) -> int:
return 0
def post_partitioning(self, args, graph, analysis_result, summary):
if args.stateless_tied:
try:
import subprocess
subprocess.check_output(['sed', '-s', '-i', f's/cuda:{args.n_partitions}/cuda:0/g', (args.output_file + '.py')])
except:
print('Failed to replaced tied dummy partition device')
def update_analysis_kwargs(self, args, config, analysis_kwargs: Dict) -> Dict[(str, Any)]:
stages_on_same_gpu = set()
if (args.lmhead and args.stateless_tied and (len(config['stages']) == (args.n_partitions + 1))):
stages_on_same_gpu = [{0, args.n_partitions}]
analysis_kwargs['stages_on_same_gpu'] = stages_on_same_gpu
return analysis_kwargs
def register_functions(self):
register_new_traced_function(math.sqrt, namespace=math)
register_new_explicit_untraced_function(operator.is_, namespace=operator)
register_new_explicit_untraced_function(operator.is_not, namespace=operator)
def get_model(self, args) -> torch.nn.Module:
if args.lmhead:
if args.stateless_tied:
model_class = StatelessGPT2LMHeadModel
else:
model_class = GPT2LMHeadModel
elif args.stateless_tied:
model_class = StatelessGPT2Model
else:
model_class = GPT2Model
model_config = GPT2Config.from_pretrained(args.model_name_or_path, cache_dir=(args.cache_dir if args.cache_dir else None))
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=model_config, cache_dir=(args.cache_dir if args.cache_dir else None)).train()
model.resize_token_embeddings(len(self.tokenizer))
if args.stateless_tied:
model.make_stateless_after_loaded_tied_and_resized()
return model
def get_input(self, args, analysis=False):
batch_size = (args.analysis_batch_size if analysis else args.partitioning_batch_size)
sampler = RandomSampler(self.ds)
dl = DataLoader(self.ds, sampler=sampler, batch_size=batch_size)
batch = next(iter(dl))
if args.lmhead:
sample = {'input_ids': batch, 'labels': batch}
else:
sample = {'input_ids': batch}
return sample
|
class MegatronParser(Parser):
def __init__(self) -> None:
if (not has_fairseq):
raise ImportError('\n\nPlease install fairseq_for_pipeline:')
super().__init__()
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.arch)
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
return output_file
def _add_data_args(self, group):
group.add_argument('--dict_path', default='../misc/megatron_11b', help="path to the folder containing megatron's dict.txt")
def _add_model_args(self, group):
group.add_argument('--arch', choices=['transformer_lm_megatron', 'transformer_lm_megatron_11b'])
def _post_parse(self, args, argv):
env = os.environ
env['MASTER_ADDR'] = '127.0.0.1'
env['MASTER_PORT'] = '6767'
env['WORLD_SIZE'] = '1'
env['RANK'] = '0'
tmp = argparse.ArgumentParser()
fairseq_defaults = dict(cpu=True, distributed_world_size=1, model_parallel_size=1, task='language_modeling', share_decoder_input_output_embed=True, checkpoint_suffix='', distributed_backend='gloo', device_id=0, distributed_init_method=None, arch=args.arch)
tmp.set_defaults(**fairseq_defaults)
argv = ([args.dict_path] + argv)
fairseq_args = options.parse_args_and_arch(tmp, input_args=argv)
for (k, v) in vars(fairseq_args).items():
setattr(args, k, v)
return args
def _default_values(self) -> Dict:
partitioning_defaults = dict(save_memory_mode=True, partitioning_batch_size=1, analysis_batch_size=1, n_partitions=16, basic_blocks=['ModelParallelMultiheadAttention'])
return partitioning_defaults
|
class MegatronPartitioner(PartitioningTask):
def __init__(self, args):
super().__init__(args)
if (not has_fairseq):
raise ImportError('\n\nPlease install fairseq_for_pipeline:')
distributed_utils.infer_init_method(args, force_distributed=True)
args.device_id = 0
args.distributed_rank = distributed_utils.distributed_init(args)
self.task = tasks.setup_task(args)
def register_functions(self):
register_new_explicit_untraced_function(operator.is_, operator)
register_new_explicit_untraced_function(operator.is_not, operator)
register_new_traced_function(log, math)
register_new_traced_function(sqrt, math)
@property
def batch_dim(self) -> int:
return 0
def get_model(self, args):
model = self.task.build_model(args)
model_size = sum(((t.nelement() * t.element_size()) for t in chain(model.parameters(), model.buffers())))
model_size /= 1000000000.0
print(f'{args.arch} model size {model_size:.2f}GB')
return model
def get_input(self, args, analysis=False):
seq_len = args.tokens_per_sample
batch_size = (args.analysis_batch_size if analysis else args.partitioning_batch_size)
return {'src_tokens': (torch.randint(1000, (batch_size, seq_len), dtype=torch.int64) + 3)}
|
class PartitioningTask(ABC):
def __init__(self, args) -> None:
pass
@property
@abstractmethod
def batch_dim(self) -> int:
pass
@abstractmethod
def get_model(self, args) -> torch.nn.Module:
pass
@abstractmethod
def get_input(self, args, analysis=False):
pass
def register_functions(self):
' register explicit_traced/untraced_functions\n \n for example if we wish to trace math.log and not trace operator.is\n\n then it should be done here\n '
def update_analysis_kwargs(self, args, config, analysis_kwargs: Dict) -> Dict:
'enable modifications of the analysis_kwargs which are passed to run_analysis\n for example set stages_on_same_gpu for gpt2 stateless\n '
return analysis_kwargs
def post_partitioning(self, args, graph, analysis_result, summary):
' hook which is called after the partitioning process is done'
|
class GetConfigFrom(Enum):
HardCoded = auto()
ParsedArgs = auto()
Generated = auto()
|
def resize_token_embeddings(model, tokenizer):
model_to_resize = (model.module if hasattr(model, 'module') else model)
model_to_resize.resize_token_embeddings(len(tokenizer))
|
def pretrained_model_config_and_tokenizer(config_class, model_class, tokenizer_class, model_name_or_path: str, config_name: str='', tokenizer_name: str='', do_lower_case: bool=False, cache_dir: str='', stateless_tied=False, do_resize_token_embedding=True, explicitly_set_dict={}, **config_kw):
config = config_class.from_pretrained((config_name if config_name else model_name_or_path), cache_dir=(cache_dir if cache_dir else None), **config_kw)
for (k, v) in explicitly_set_dict.items():
setattr(config, k, v)
tokenizer = tokenizer_class.from_pretrained((tokenizer_name if tokenizer_name else model_name_or_path), do_lower_case=do_lower_case, cache_dir=(cache_dir if cache_dir else None))
if (transformers.__version__ > '4.1.1'):
model = model_class.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None))
else:
use_cdn = (model_name_or_path not in {'t5-11b'})
model = model_class.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None), use_cdn=use_cdn)
if do_resize_token_embedding:
resize_token_embeddings(model, tokenizer)
if stateless_tied:
model_to_resize = (model.module if hasattr(model, 'module') else model)
if hasattr(model_to_resize, 'make_stateless_after_loaded_tied_and_resized'):
model_to_resize.make_stateless_after_loaded_tied_and_resized()
elif hasattr(model_to_resize, 'make_stateless'):
model_to_resize.make_stateless()
else:
raise ValueError(f'Problem making model stateless. model_name_or_path: {model_name_or_path}')
return (model, tokenizer, config)
|
def _register_model(dict_params, model_cls):
global MODEL_CFG_TO_SAMPLE_MODEL
global MODEL_CONFIGS
MODEL_CONFIGS.update(dict_params)
MODEL_CFG_TO_SAMPLE_MODEL.update({k: model_cls for k in dict_params.keys()})
|
class ParsePartitioningOptsVision(Parser):
def _add_model_args(self, group):
group.add_argument('--model', choices=MODEL_CONFIGS.keys(), default='wrn_16x4')
def _add_data_args(self, group):
group.add_argument('--crop', type=int, default=32, help='crop size to use. (e.g: 32 for cifar, 224 for imagenet, 384 for some ViTs')
def _default_values(self):
return {'partitioning_batch_size': 128, 'n_iter': 100, 'n_partitions': 4, 'bw': 12, 'analysis_batch_size': 32}
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.model).replace('-', '_')
model_str += f'c{args.crop}'
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
m = args.partitioning_method.lower()
tmp = (m if (m != '2dbin') else 'virtual_stages')
output_file += f'_{tmp}'
return output_file
|
class VisionPartioner(PartitioningTask):
def get_model(self, args) -> torch.nn.Module:
return MODEL_CFG_TO_SAMPLE_MODEL[args.model](**MODEL_CONFIGS[args.model]).train()
@property
def batch_dim(self) -> int:
return 0
def get_input(self, args, analysis=False):
if analysis:
batch_size = args.analysis_batch_size
else:
batch_size = args.partitioning_batch_size
sample = torch.randn(batch_size, 3, args.crop, args.crop)
return sample
|
def tmpt5_base_tied_lmheads_512_4_4p_bw12_squad1_mpipe():
return dict(model_type='new_t5_stateless', model_name_or_path='t5-base', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
|
class NewT5HFLoader(HFLoader):
def __init__(self, hf_transformers_model_class=T5ForConditionalGeneration):
super().__init__(hf_transformers_model_class=hf_transformers_model_class)
def substitue_state_dict_keys_back_to_original(self, training_state_dict):
d = dict()
for (k, v) in training_state_dict.items():
d[k] = v
if ('shared_embed_weight' in d):
w = d.pop('shared_embed_weight')
d['shared.weight'] = d['encoder.embed_tokens.weight'] = d['decoder.embed_tokens.weight'] = w
return d
|
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.precomputed_masks = config.precomputed_masks
for i in range(config.num_layers):
self.add_module(str(i), T5Block(config, has_relative_attention_bias=bool((i == 0))))
self.num_layers = config.num_layers
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(self, input_ids, shared_embedding, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
input_shape = input_ids.size()
input_ids = input_ids.view((- 1), input_shape[(- 1)])
assert is_not_None(self.embed_tokens), 'You have to intialize the model with valid token embeddings'
inputs_embeds = self.embed_tokens(shared_embedding, input_ids)
(batch_size, seq_length) = input_shape
if (not self.precomputed_masks):
if is_None(attention_mask):
attention_mask = torch.ones(batch_size, seq_length).to(inputs_embeds.device)
if (self.is_decoder and is_None(encoder_attention_mask) and is_not_None(encoder_hidden_states)):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(batch_size, encoder_seq_length).to(inputs_embeds.device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, attention_mask.device)
if (self.is_decoder and is_not_None(encoder_attention_mask)):
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
else:
extended_attention_mask = attention_mask
encoder_extended_attention_mask = encoder_attention_mask
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i in range(self.num_layers):
layer_module = getattr(self, str(i))
layer_outputs = layer_module(hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias)
if (i == 0):
hidden_states = layer_outputs[0]
position_bias = layer_outputs[1]
if (self.is_decoder and is_not_None(encoder_hidden_states)):
encoder_decoder_position_bias = layer_outputs[2]
else:
hidden_states = layer_outputs
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
@add_start_docstrings('The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top.', T5_START_DOCSTRING)
class T5Model(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = T5Stack(decoder_config, self.shared)
self.output_only = config.output_only
self.precomputed_masks = config.precomputed_masks
self.init_weights()
def make_stateless(self):
stateless_shared = StatelessEmbedding(self.shared)
self.encoder.embed_tokens = StatelessEmbedding(self.shared)
self.decoder.embed_tokens = StatelessEmbedding(self.shared)
del self.shared
self.encoder.embed_tokens.pop_weight()
self.decoder.embed_tokens.pop_weight()
self.shared_embed_weight = stateless_shared.pop_weight()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
' Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n '
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, inverted_encoder_attention_mask=None):
'\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n If `decoder_past_key_value_states` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output.\n decoder_past_key_value_states (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`, `optional`, returned when ``use_cache=True``):\n Contains pre-computed key and value hidden-states of the attention blocks.\n Can be used to speed up sequential decoding (see `decoder_past_key_value_states` input).\n Note that when using `decoder_past_key_value_states`, the model only outputs the last `hidden-state` of the sequence of shape :obj:`(batch_size, 1, config.vocab_size)`.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import T5Tokenizer, T5Model\n\n tokenizer = T5Tokenizer.from_pretrained(\'t5-small\')\n model = T5Model.from_pretrained(\'t5-small\')\n input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="pt") # Batch size 1\n outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n '
encoder_hidden_states = self.encoder(input_ids=input_ids, shared_embedding=self.shared_embed_weight, attention_mask=attention_mask)
decoder_hidden_states = self.decoder(input_ids=decoder_input_ids, shared_embedding=self.shared_embed_weight, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=(inverted_encoder_attention_mask if self.precomputed_masks else attention_mask))
if self.output_only:
return decoder_hidden_states
return (decoder_hidden_states, encoder_hidden_states)
|
@add_start_docstrings('T5 Model with a `language modeling` head on top. ', T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.lm_loss = nn.CrossEntropyLoss(ignore_index=(- 100))
self.output_only = config.output_only
self.precomputed_masks = config.precomputed_masks
self.init_weights()
def make_stateless(self):
stateless_shared = StatelessEmbedding(self.shared)
self.encoder.embed_tokens = StatelessEmbedding(self.shared)
self.decoder.embed_tokens = StatelessEmbedding(self.shared)
del self.shared
self.encoder.embed_tokens.pop_weight()
self.decoder.embed_tokens.pop_weight()
self.shared_embed_weight = stateless_shared.pop_weight()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, inverted_encoder_attention_mask=None, lm_labels=None):
'\n lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_label` is provided):\n Classification loss (cross entropy).\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n If `past_key_value_states` is used only the last prediction_scores of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output.\n decoder_past_key_value_states (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`, `optional`, returned when ``use_cache=True``):\n Contains pre-computed key and value hidden-states of the attention blocks.\n Can be used to speed up sequential decoding (see `decoder_past_key_value_states` input).\n Note that when using `decoder_past_key_value_states`, the model only outputs the last `prediction_score` of the sequence of shape :obj:`(batch_size, 1, config.vocab_size)`.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention.\n\n Examples::\n\n from transformers import T5Tokenizer, T5ForConditionalGeneration\n\n tokenizer = T5Tokenizer.from_pretrained(\'t5-small\')\n model = T5ForConditionalGeneration.from_pretrained(\'t5-small\')\n input_ids = tokenizer.encode("Hello, my dog is cute", return_tensors="pt") # Batch size 1\n outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n tokenizer = T5Tokenizer.from_pretrained(\'t5-small\')\n model = T5ForConditionalGeneration.from_pretrained(\'t5-small\')\n input_ids = tokenizer.encode("summarize: Hello, my dog is cute", return_tensors="pt") # Batch size 1\n outputs = model.generate(input_ids)\n '
encoder_hidden_states = self.encoder(input_ids=input_ids, shared_embedding=self.shared_embed_weight, attention_mask=attention_mask)
if (is_not_None(lm_labels) and is_None(decoder_input_ids)):
decoder_input_ids = self._shift_right(lm_labels)
decoder_hidden_states = self.decoder(input_ids=decoder_input_ids, shared_embedding=self.shared_embed_weight, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=(inverted_encoder_attention_mask if self.precomputed_masks else attention_mask))
sequence_output = (decoder_hidden_states * (self.model_dim ** (- 0.5)))
lm_logits = self.lm_head(sequence_output)
decoder_outputs = (lm_logits, decoder_hidden_states)
if is_not_None(lm_labels):
loss_fct = self.lm_loss
loss = loss_fct(lm_logits.view((- 1), lm_logits.size((- 1))), lm_labels.view((- 1)))
decoder_outputs = ((loss,) + decoder_outputs)
if self.output_only:
return decoder_outputs[0]
return (decoder_outputs + (encoder_hidden_states,))
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, **kwargs):
assert is_not_None(past), 'past has to be defined for encoder_outputs'
if (len(past) < 2):
(encoder_outputs, decoder_past_key_value_states) = (past, None)
else:
(encoder_outputs, decoder_past_key_value_states) = (past[0], past[1])
return {'decoder_input_ids': input_ids, 'decoder_past_key_value_states': decoder_past_key_value_states, 'encoder_outputs': encoder_outputs, 'attention_mask': attention_mask, 'use_cache': use_cache}
def _reorder_cache(self, past, beam_idx):
if (len(past) < 2):
logger.warning('You might want to consider setting `use_cache=True` to speed up decoding')
return past
decoder_past = past[1]
past = (past[0],)
reordered_decoder_past = ()
for layer_past_states in decoder_past:
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
reordered_layer_past_states = (reordered_layer_past_states + (layer_past_state.index_select(0, beam_idx),))
assert (reordered_layer_past_states[0].shape == layer_past_states[0].shape)
assert (len(reordered_layer_past_states) == len(layer_past_states))
reordered_decoder_past = (reordered_decoder_past + (reordered_layer_past_states,))
return (past + (reordered_decoder_past,))
|
def copy_attrs(me, other, attr_names: List[str]):
for name in attr_names:
setattr(me, name, getattr(other, name))
|
class StatelessEmbedding(nn.Module):
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm', 'norm_type', 'scale_grad_by_freq', 'sparse']
def __init__(self, other: nn.Embedding):
super().__init__()
self.num_embeddings = other.num_embeddings
self.embedding_dim = other.embedding_dim
self.padding_idx = other.padding_idx
self.max_norm = other.max_norm
self.norm_type = other.norm_type
self.scale_grad_by_freq = other.scale_grad_by_freq
self.weight = other.weight
self.sparse = other.sparse
def forward(self, weight, input):
return F.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
def pop_weight(self):
tmp = self.weight
del self.weight
return tmp
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if (self.padding_idx is not None):
s += ', padding_idx={padding_idx}'
if (self.max_norm is not None):
s += ', max_norm={max_norm}'
if (self.norm_type != 2):
s += ', norm_type={norm_type}'
if (self.scale_grad_by_freq is not False):
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if (self.sparse is not False):
s += ', sparse=True'
return s.format(**self.__dict__)
|
class StatelessLinear(nn.Module):
' Stateless Linear layer with shared weight.\n bias is not shared\n '
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, other: nn.Linear):
super().__init__()
self.in_features = other.in_features
self.out_features = other.out_features
self.weight = other.weight
self.bias = other.bias
def forward(self, weight, input):
return F.linear(input, weight.requires_grad_(), self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, (self.bias is not None))
def pop_weight(self):
tmp = self.weight
del self.weight
return tmp
|
class StatelessSequential(nn.Sequential):
'Sequential model where first and last layers are tied.\n NOTE: it can be generalized to a model where more layers are tied\n '
def __init__(self, *args):
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
raise NotImplementedError('not supprting ordered dicts for now')
else:
first = args[0]
last = args[(len(args) - 1)]
supported = [nn.Embedding, nn.Linear]
def check_supported(l, name):
if (not any([isinstance(l, k) for k in supported])):
raise NotImplementedError(f'{name} layer should be one of {supported}, got {l.__class__}')
check_supported(first, 'first')
check_supported(last, 'last')
first_cls = CLASS_TO_STATELESS_CLASS[first.__class__]
last_cls = CLASS_TO_STATELESS_CLASS[last.__class__]
tied_weight = last.weight.data = first.weight.data
first_stateless_instance = first_cls(first)
last_stateless_instance = last_cls(last)
first_stateless_instance.pop_weight()
last_stateless_instance.pop_weight()
first = first_stateless_instance
last = last_stateless_instance
args = [first, *args[1:(- 1)], last]
super().__init__(*args)
self.tied_weight = tied_weight
def pop_weight(self):
tmp = self.tied_weight
del self.tied_weight
return tmp
def forward(self, tied_weight, input):
for (i, module) in enumerate(self):
if ((i == 0) or (i == (len(self) - 1))):
input = module(tied_weight, input)
else:
input = module(input)
return input
|
class CompositionStatelessSequential(nn.Module):
def __init__(self, *args):
super().__init__()
stateless_seq = StatelessSequential(*args)
self.tied_w = nn.Parameter(stateless_seq.pop_weight())
self.stateless_seq = stateless_seq
def forward(self, *args, **kw):
return self.stateless_seq.forward(self.tied_w, *args, **kw)
|
class PreTrainedModel(TransformersPretrainedModel):
KEY_TRANSLATION = None
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
"Instantiate a pretrained pytorch model from a pre-trained model configuration.\n\n The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with ``model.train()``\n\n The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.\n It is up to you to train those weights with a downstream fine-tuning task.\n\n The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path: either:\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) one of:\n - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or\n - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n # For example purposes. Not runnable.\n model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')\n model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n "
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
output_loading_info = kwargs.pop('output_loading_info', False)
local_files_only = kwargs.pop('local_files_only', False)
use_cdn = kwargs.pop('use_cdn', True)
if (not isinstance(config, PretrainedConfig)):
config_path = (config if (config is not None) else pretrained_model_name_or_path)
(config, model_kwargs) = cls.config_class.from_pretrained(config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs)
else:
model_kwargs = kwargs
if (pretrained_model_name_or_path is not None):
if os.path.isdir(pretrained_model_name_or_path):
if (from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, (TF_WEIGHTS_NAME + '.index')))):
archive_file = os.path.join(pretrained_model_name_or_path, (TF_WEIGHTS_NAME + '.index'))
elif (from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME))):
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError('Error no file named {} found in directory {} or `from_tf` set to False'.format([WEIGHTS_NAME, TF2_WEIGHTS_NAME, (TF_WEIGHTS_NAME + '.index')], pretrained_model_name_or_path))
elif (os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path)):
archive_file = pretrained_model_name_or_path
elif os.path.isfile((pretrained_model_name_or_path + '.index')):
assert from_tf, 'We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint'.format((pretrained_model_name_or_path + '.index'))
archive_file = (pretrained_model_name_or_path + '.index')
elif (transformers.__version__ < '3.5.0'):
archive_file = hf_bucket_url(pretrained_model_name_or_path, filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME), use_cdn=use_cdn)
else:
archive_file = hf_bucket_url(pretrained_model_name_or_path, filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME))
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only)
if (resolved_archive_file is None):
raise EnvironmentError
except EnvironmentError:
msg = f'''Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:
- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'
- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.
'''
raise EnvironmentError(msg)
if (resolved_archive_file == archive_file):
logger.info('loading weights file {}'.format(archive_file))
else:
logger.info('loading weights file {} from cache at {}'.format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
model = cls(config, *model_args, **model_kwargs)
if ((state_dict is None) and (not from_tf)):
try:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
except Exception:
raise OSError('Unable to load weights from pytorch checkpoint file. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. ')
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith('.index'):
model = cls.load_tf_weights(model, config, resolved_archive_file[:(- 6)])
else:
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.')
raise
else:
assert (cls.KEY_TRANSLATION is not None)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = key
for (k, v) in cls.KEY_TRANSLATION.items():
if (k in new_key):
new_key = new_key.replace(k, v)
if ('gamma' in new_key):
new_key = new_key.replace('gamma', 'weight')
if ('beta' in new_key):
new_key = new_key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module: torch.nn.Module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
start_prefix = ''
model_to_load = model
has_prefix_module = any((s.startswith(cls.base_model_prefix) for s in state_dict.keys()))
if ((not hasattr(model, cls.base_model_prefix)) and has_prefix_module):
start_prefix = (cls.base_model_prefix + '.')
if (hasattr(model, cls.base_model_prefix) and (not has_prefix_module)):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if (model.__class__.__name__ != model_to_load.__class__.__name__):
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [key.split((cls.base_model_prefix + '.'))[(- 1)] for key in model.state_dict().keys()]
missing_keys.extend((head_model_state_dict_without_base_prefix - base_model_state_dict))
if (len(missing_keys) > 0):
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
if (len(unexpected_keys) > 0):
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
if (len(error_msgs) > 0):
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(model.__class__.__name__, '\n\t'.join(error_msgs)))
model.tie_weights()
model.eval()
print({'missing_keys': missing_keys[:10], 'error_msgs': error_msgs[:10]})
if output_loading_info:
loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'error_msgs': error_msgs}
return (model, loading_info)
if (hasattr(config, 'xla_device') and config.xla_device):
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model.to(xm.xla_device())
return model
def forward(self, *args, **kwargs):
raise NotImplementedError
|
class Dataset(torch.utils.data.Dataset):
def __init__(self, n, k, max_samples_num, just=None):
self.just = just
self.samples_num = int(max_samples_num)
self.n = n
self.node_list = list(range(n))
self.k = k
self.edge_dict = {}
(A, B) = np.tril_indices(n, k=(- 1))
ctr = 0
for (i, j) in zip(A, B):
self.edge_dict[(i, j)] = ctr
self.edge_dict[(j, i)] = ctr
ctr += 1
def __len__(self):
return self.samples_num
def __getitem__(self, index):
if (self.just == 'x'):
X = (torch.randint(0, 2, (((self.n * (self.n - 1)) // 2),)) - 0.5)
if ((index % 2) == 0):
ch_nodes = np.random.choice(self.node_list, self.k, replace=False)
X[[self.edge_dict[c] for c in combinations(ch_nodes, 2)]] = 0.5
return X
return X
elif (self.just == 'y'):
if ((index % 2) == 0):
return torch.tensor([1.0])
else:
return torch.tensor([0.0])
else:
X = (torch.randint(0, 2, (((self.n * (self.n - 1)) // 2),)) - 0.5)
if ((index % 2) == 0):
ch_nodes = np.random.choice(self.node_list, self.k, replace=False)
X[[self.edge_dict[c] for c in combinations(ch_nodes, 2)]] = 0.5
return (X, torch.tensor([1.0]))
return (X, torch.tensor([0.0]))
|
class Net(nn.Module):
def __init__(self, n, c, n_split=4):
super(Net, self).__init__()
dim_1 = (2 + (((3 * n) * (n - 1)) // 4))
if ((dim_1 % n_split) != 0):
warnings.warn('changed dim_1')
dim_1 -= (dim_1 % n_split)
self.input_layer = SplitLinear(nn.Linear(((n * (n - 1)) // 2), dim_1), n_split=n_split)
self.bn1 = nn.BatchNorm1d(dim_1)
self.h1_layer = nn.Linear(dim_1, c)
self.bn2 = nn.BatchNorm1d(c)
self.h2_layer = nn.Linear(c, (c // 20))
self.bn3 = nn.BatchNorm1d((c // 20))
self.output_layer = nn.Linear((c // 20), 1)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.input_layer(x)))
x = F.leaky_relu(self.bn2(self.h1_layer(x)))
x = F.leaky_relu(self.bn3(self.h2_layer(x)))
x = self.output_layer(x)
return x
|
class NetWithoutSplit(nn.Module):
def __init__(self, n, c):
super(NetWithoutSplit, self).__init__()
self.input_layer = nn.Linear(((n * (n - 1)) // 2), (((3 * n) * (n - 1)) // 4))
self.bn1 = nn.BatchNorm1d((((3 * n) * (n - 1)) // 4))
self.h1_layer = nn.Linear((((3 * n) * (n - 1)) // 4), c)
self.bn2 = nn.BatchNorm1d(c)
self.h2_layer = nn.Linear(c, (c // 20))
self.bn3 = nn.BatchNorm1d((c // 20))
self.output_layer = nn.Linear((c // 20), 1)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.input_layer(x)))
x = F.leaky_relu(self.bn2(self.h1_layer(x)))
x = F.leaky_relu(self.bn3(self.h2_layer(x)))
x = self.output_layer(x)
return x
|
class Dummy(nn.Module):
def __init__(self):
super(Dummy, self).__init__()
self.l0 = nn.Linear(100, 100)
self.l1 = nn.Linear(100, 100)
self.l2 = nn.Linear(100, 100)
self.l3 = nn.Linear(100, 100)
def forward(self, x):
output2 = self.l0(x)
t0 = self.l1(x)
t1 = self.l2(t0)
(output0, output1) = self.l3(t1)
return (output1, output0, output2)
|
class Stage0(nn.Module):
def __init__(self, layers, tensors):
super(Stage0, self).__init__()
assert ('Dummy/Linear[l0]' in layers)
self.l = layers['Dummy/Linear[l0]']
assert isinstance(self.l, nn.Linear)
def forward(self, x):
return (self.l(x),)
|
class Stage1(nn.Module):
def __init__(self, layers, tensors):
super(Stage1, self).__init__()
assert ('Dummy/Linear[l1]' in layers)
self.l = layers['Dummy/Linear[l1]']
assert isinstance(self.l, nn.Linear)
def forward(self, x):
return (self.l(x),)
|
class Stage2(nn.Module):
def __init__(self, layers, tensors):
super(Stage2, self).__init__()
assert ('Dummy/Linear[l2]' in layers)
self.l = layers['Dummy/Linear[l2]']
assert isinstance(self.l, nn.Linear)
def forward(self, x):
return (self.l(x),)
|
class Stage3(nn.Module):
def __init__(self, layers, tensors):
super(Stage3, self).__init__()
assert ('Dummy/Linear[l3]' in layers)
self.l = layers['Dummy/Linear[l3]']
assert isinstance(self.l, nn.Linear)
def forward(self, x):
x = self.l(x)
return (x, (x + 1))
|
class SplitLinear(nn.Module):
' Split Linear layer.\n by the dimension of out_features\n (For each split, the output will be smaller. Requires stack at the end)\n '
__constants__ = ['in_features', 'out_features']
def __init__(self, other: nn.Linear, n_split: int):
super().__init__()
self.in_features = other.in_features
self.out_features = other.out_features
weight = other.weight
bias = other.bias
t = (self.out_features // n_split)
if ((self.out_features % n_split) != 0):
raise NotImplementedError()
self.weights = nn.ParameterList([nn.Parameter(a.contiguous()) for a in weight.split(t, dim=0)])
self.n_split = n_split
if (bias is None):
self.biases = ([None] * n_split)
else:
self.biases = nn.ParameterList([nn.Parameter(a.contiguous()) for a in bias.split(t, dim=0)])
def forward(self, input):
return torch.cat([F.linear(input, w, b) for (w, b) in zip(self.weights, self.biases)], dim=(- 1))
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, (self.biases[0] is not None))
|
class SplitLinearIn(nn.Module):
' Split Linear layer.\n by the dimension of in_features\n (For each split, the input will be smaller.\n Requires sum and adding bias at the end)\n '
__constants__ = ['in_features', 'out_features']
def __init__(self, other: nn.Linear, n_split: int):
super().__init__()
self.in_features = other.in_features
self.out_features = other.out_features
weight = other.weight
self.bias = other.bias
t = (in_features // n_split)
self.t = t
if ((in_features % n_split) != 0):
raise NotImplementedError()
self.weights = nn.ParameterList([nn.Parameter(a.contiguous()) for a in weight.split(t, dim=1)])
self.n_split = n_split
def forward(self, input):
si = input.split(self.t, dim=(- 1))
r = torch.stack([F.linear(i, w) for (w, i) in zip(self.weights, si)], 0).sum(0)
if (self.bias is not None):
return r.add_(self.bias)
else:
return r
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, (self.bias is not None))
|
class NoReduceSplitLinear(SplitLinear):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def forward(self, input):
return [F.linear(input, w, b) for (w, b) in zip(self.weights, self.biases)]
|
class NoReduceSplitLinearIn(SplitLinearIn):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def forward(self, split_input):
return [F.linear(i, w) for (w, i) in zip(self.weights, split_input)]
|
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
def alexnet(pretrained=False, **kwargs):
'AlexNet model architecture from the\n `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
|
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
(self.add_module('norm1', nn.BatchNorm2d(num_input_features)),)
(self.add_module('relu1', nn.ReLU(inplace=True)),)
(self.add_module('conv1', nn.Conv2d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False)),)
(self.add_module('norm2', nn.BatchNorm2d((bn_size * growth_rate))),)
(self.add_module('relu2', nn.ReLU(inplace=True)),)
(self.add_module('conv2', nn.Conv2d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),)
self.drop_rate = drop_rate
if (self.drop_rate > 0):
self.add_module('dropout', nn.Dropout(p=self.drop_rate))
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
return torch.cat([x, new_features], 1)
|
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate, bn_size, drop_rate)
self.add_module(('denselayer%d' % (i + 1)), layer)
|
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
|
class DenseNet(nn.Module):
'Densenet-BC model class, based on\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n '
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
num_features = num_init_features
for (i, num_layers) in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module(('denseblock%d' % (i + 1)), block)
num_features = (num_features + (num_layers * growth_rate))
if (i != (len(block_config) - 1)):
trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2))
self.features.add_module(('transition%d' % (i + 1)), trans)
num_features = (num_features // 2)
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.relu = nn.ReLU()
self.adaptive_avg_pool2d = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = self.relu(features)
out = self.adaptive_avg_pool2d(out)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
|
def densenet121(pretrained=False, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet121'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
|
def densenet169(pretrained=False, **kwargs):
'Densenet-169 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet169'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
|
def densenet201(pretrained=False, **kwargs):
'Densenet-201 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet201'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
|
def densenet161(pretrained=False, **kwargs):
'Densenet-161 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet161'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model
|
class Inception(nn.Module):
def __init__(self, in_planes, kernel_1_x, kernel_3_in, kernel_3_x, kernel_5_in, kernel_5_x, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, kernel_1_x, kernel_size=1), nn.BatchNorm2d(kernel_1_x), nn.ReLU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, kernel_3_in, kernel_size=1), nn.BatchNorm2d(kernel_3_in), nn.ReLU(True), nn.Conv2d(kernel_3_in, kernel_3_x, kernel_size=3, padding=1), nn.BatchNorm2d(kernel_3_x), nn.ReLU(True))
self.b3 = nn.Sequential(nn.Conv2d(in_planes, kernel_5_in, kernel_size=1), nn.BatchNorm2d(kernel_5_in), nn.ReLU(True), nn.Conv2d(kernel_5_in, kernel_5_x, kernel_size=3, padding=1), nn.BatchNorm2d(kernel_5_x), nn.ReLU(True), nn.Conv2d(kernel_5_x, kernel_5_x, kernel_size=3, padding=1), nn.BatchNorm2d(kernel_5_x), nn.ReLU(True))
self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.ReLU(True))
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.