code
stringlengths
17
6.64M
def wrn16x4_c100(): "\n all_results_to_df.py\n -I- There are 236 json files in ['results/4partitions', 'results/sequential']\n -I- Creating....\n -I- Created df.shape: (68370, 23)\n -I- Writing csv: ./4p_seq_ddpsim.csv\n -I- Done\n -I- Describing csv: ./4p_seq_ddpsim.csv\n -I- Analyzed cols: ['alg', 'bs_train', 'model', 'dataset', 'seed', 'step_every']\n -I- length_uniques:\n {'alg': 9, 'bs_train': 4, 'model': 3, 'dataset': 2, 'seed': 5, 'step_every': 4}\n -I- uniques:\n {'alg': array(['stale_ws', 'wp_ga_ws', 'stale', 'wp', 'wp_ga', 'wp_ws',\n 'stale_ga_ws', 'stale_ws_pipedream', 'sync'], dtype=object), 'bs_train': array([ 128, 1024, 512, 32]), 'model': array(['wrn_16x4_p4', 'wrn_28x10_c100_dr03_p4', 'wrn_16x4_c100_p4'],\n dtype=object), 'dataset': array(['cifar10', 'cifar100'], dtype=object), 'seed': array([ 1322019, 20202020, 314159, 42, 77777777]), 'step_every': array([1, 2, 4, 8])}\n " graph = 'test_acc' dataset = 'cifar100' model = 'wrn_16x4_c100_p4' csv = '4p_seq_ddpsim.csv' out_file_name = f'{graph}_{model}.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query('dataset == @dataset and model == @model and bs_train == 128') ax = sns.catplot(x='epoch', y=graph, hue='alg', col='step_every', kind='bar', data=df.query('epoch == 200')) ax.set(ylim=(54, 77)) if hasattr(ax, 'get_figure'): ax.set_title(f'{graph}_{model}') fig = ax.get_figure() fig.savefig(out_file_name) else: ax.savefig(out_file_name) print(f'saving file to {out_file_name}')
def wrn16x4_c100_gap(): "\n all_results_to_df.py\n -I- There are 236 json files in ['results/4partitions', 'results/sequential']\n -I- Creating....\n -I- Created df.shape: (68370, 23)\n -I- Writing csv: ./4p_seq_ddpsim.csv\n -I- Done\n -I- Describing csv: ./4p_seq_ddpsim.csv\n -I- Analyzed cols: ['alg', 'bs_train', 'model', 'dataset', 'seed', 'step_every']\n -I- length_uniques:\n {'alg': 9, 'bs_train': 4, 'model': 3, 'dataset': 2, 'seed': 5, 'step_every': 4}\n -I- uniques:\n {'alg': array(['stale_ws', 'wp_ga_ws', 'stale', 'wp', 'wp_ga', 'wp_ws',\n 'stale_ga_ws', 'stale_ws_pipedream', 'sync'], dtype=object), 'bs_train': array([ 128, 1024, 512, 32]), 'model': array(['wrn_16x4_p4', 'wrn_28x10_c100_dr03_p4', 'wrn_16x4_c100_p4'],\n dtype=object), 'dataset': array(['cifar10', 'cifar100'], dtype=object), 'seed': array([ 1322019, 20202020, 314159, 42, 77777777]), 'step_every': array([1, 2, 4, 8])}\n " graph = 'p0_gap' dataset = 'cifar100' model = 'wrn_16x4_c100_p4' alg = 'wp_ga_ws' csv = '4p_seq_ddpsim.csv' out_file_name = f'{graph}_{model}.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query('dataset == @dataset and model == @model and bs_train == 128 and alg== @alg') ax = sns.PairGrid(y_vars=[f'p{i}_gap' for i in range(3)], x_vars=['epoch'], hue='step_every', data=df).map(plt.plot).add_legend() if hasattr(ax, 'get_figure'): ax.set_title(f'{graph}_{model}') fig = ax.get_figure() fig.savefig(out_file_name) else: ax.savefig(out_file_name) print(f'saving file to {out_file_name}')
def for_meeting(): csv = 'for_meeting.csv' out_file_name = 'for_meeting.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100'").query('epoch == 200').query("bs_train == 32 and alg == 'ddp' or bs_train == 128 and alg != 'ddp'") ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(80, 83) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def for_meeting2(): csv = 'for_meeting.csv' out_file_name = 'for_meeting_bigger_ddp.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100'").query('epoch == 200').query("bs_train == 128 and alg == 'ddp' or bs_train == 128 and alg != 'ddp'") ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(80, 83) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def arrowed_spines(fig, ax): (xmin, xmax) = ax.get_xlim() (ymin, ymax) = ax.get_ylim() for side in ['bottom', 'right', 'top', 'left']: ax.spines[side].set_visible(False) plt.xticks([]) plt.yticks([]) ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') dps = fig.dpi_scale_trans.inverted() bbox = ax.get_window_extent().transformed(dps) (width, height) = (bbox.width, bbox.height) hw = ((1.0 / 20.0) * (ymax - ymin)) hl = ((1.0 / 20.0) * (xmax - xmin)) lw = 1.0 ohg = 0.3 yhw = ((((hw / (ymax - ymin)) * (xmax - xmin)) * height) / width) yhl = ((((hl / (xmax - xmin)) * (ymax - ymin)) * width) / height) ax.arrow(xmin, ymin, (xmax - xmin), 0.0, fc='k', ec='k', lw=lw, head_width=hw, head_length=hl, overhang=ohg, length_includes_head=True, clip_on=False) ax.arrow(xmin, ymin, 0.0, (ymax - ymin), fc='k', ec='k', lw=lw, head_width=yhw, head_length=yhl, overhang=ohg, length_includes_head=True, clip_on=False)
def sanitize_memory(memory_mb): return ((memory_mb / 11019) * GPU_MAX_WIDTH)
def sanitize_utilizaton(util): return ((util / 100) * GPU_HEIGHT)
def add_gpu(ax, gpu_id, memory, utilization): memory = sanitize_memory(memory) utilization = sanitize_utilizaton(utilization) frac = utilization fat = memory height = GPU_HEIGHT x = (gpu_id * (GPU_MAX_WIDTH + SPACE)) y = GPU_DOWN_BORDER bb = Rectangle((x, y), GPU_MAX_WIDTH, GPU_HEIGHT, linewidth=BB_LINE_WIDTH, fill=None) rect = Rectangle((x, y), fat, height, linewidth=0, fill=None) ax.add_patch(rect) ax.add_patch(bb) ax.fill_between([x, (x + fat)], y, (y + (height * frac)), hatch='\\', color=None)
def load_experiment(filename) -> Tuple[(Dict[(Any, Any)], Dict[(Any, Any)])]: ' Returns:\n config, fit_res\n ' with open(filename, 'r') as f: output = json.load(f) config = output['config'] fit_res = output['results'] return (config, fit_res)
def load_experiment_for_update(run_name, out_dir): output_filename = f'{os.path.join(out_dir, run_name)}.json' with open(output_filename, 'r') as f: output = json.load(f) config = output['config'] fit_res = output['results'] return (config, fit_res)
def save_experiment(run_name, out_dir, config, fit_res: Dict): if isinstance(fit_res, NamedTuple): fit_res = fit_res._asdict() elif isinstance(fit_res, SimpleNamespace): fit_res = fit_res.__dict__ output = dict(config=config, results=fit_res) output_filename = f'{os.path.join(out_dir, run_name)}.json' os.makedirs(out_dir, exist_ok=True) with open(output_filename, 'w') as f: try: json.dump(output, f, indent=2) except Exception as e: print('-E- error saving experiment, printing for easier debug') print(('-' * 40)) print(output) print(('-' * 40)) raise e print(f'*** Output file {output_filename} written')
class ArgsStasher(): '\n used for naming and reproducibility conventions,\n (as sometimes we change the args inplace)\n ' STASH_NAME = '_tmp_stashed' @staticmethod def stash_to_args(args, replaced_key, old_value): if (not hasattr(args, 'auto_file_name')): return STASH_NAME = ArgsStasher.STASH_NAME if isinstance(replaced_key, list): replaced_key = tuple(replaced_key) if (not hasattr(args, STASH_NAME)): setattr(args, STASH_NAME, dict()) sd = getattr(args, STASH_NAME) sd[replaced_key] = old_value @staticmethod def reload_stashed_args(args): STASH_NAME = ArgsStasher.STASH_NAME if (not hasattr(args, STASH_NAME)): return sd = getattr(args, STASH_NAME) for (replaced_key, old_value) in sd.items(): attr = args if isinstance(replaced_key, tuple): for a in replaced_key[:(- 1)]: attr = getattr(attr, a) last_key = replaced_key[(- 1)] else: last_key = replaced_key assert isinstance(last_key, str) setattr(attr, last_key, old_value) delattr(args, STASH_NAME)
def auto_file_name(args, verbose=True): ArgsStasher.reload_stashed_args(args) 'This is used to distinguish different configurations by file name ' assert hasattr(args, 'auto_file_name') wp = (args.weight_prediction['type'] if hasattr(args, 'weight_prediction') else 'stale') ws = ('ws_' if getattr(args, 'weight_stashing', False) else '') ga = ('ga_' if hasattr(args, 'gap_aware') else '') bs = f'bs_{(args.bs_train * args.step_every)}' se = f'se_{args.step_every}' ga_just_for_loss = ('gaJFL_' if getattr(args, 'gap_aware_just_loss', False) else '') if ('gpipe' == args.work_scheduler.lower()): s = f'{args.model}_{args.dataset}_gpipe_{bs}_{se}_seed_{args.seed}' else: s = f'{args.model}_{args.dataset}_{wp}_{ws}{ga}{bs}_{se}_{ga_just_for_loss}seed_{args.seed}' args.out_filename = f'{args.out_filename}_{s}' if verbose: print(f'Out File Name will be: {args.out_filename}') return args.out_filename
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}) import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42
def parse_all_eval_results_dict(fn): with open(fn, 'r') as f: d = ast.literal_eval(f.read()) return d
def extract_values(d, subkey=None, verbose=False): if (subkey is None): s = set() for v in d.values(): for x in v.keys(): s.add(x) if (len(s) == 1): subkey = next(iter(s)) else: raise ValueError('please choose subkey from', s) if verbose: print(f'inferring subkey as {subkey}') keys = [(1 + x) for x in list(d.keys())] values = [d[k][subkey] for k in d] return {k: v for (k, v) in zip(keys, values)}
def plot_epochs_vs_accuracy(*, gpipe_dict=None, stale_dict=None, acc_without_ft=None, title='super_glue_boolq_accuracy', ylabel=f'Accuracy'): (fix, ax) = plt.subplots() if (acc_without_ft is None): ax.plot(list(gpipe_dict.keys()), list(gpipe_dict.values()), marker=GPIPE_MARKER, label='gpipe') ax.plot(list(stale_dict.keys()), list(stale_dict.values()), marker=STALE_MARKER, label='ours') else: ax.plot(([0] + list(gpipe_dict.keys())), ([acc_without_ft] + list(gpipe_dict.values())), marker=GPIPE_MARKER, label='gpipe') ax.plot(([0] + list(stale_dict.keys())), ([acc_without_ft] + list(stale_dict.values())), marker=STALE_MARKER, label='ours') ax.legend() ax.set_title(title) ax.set_xlabel(f'Epochs') ax.set_ylabel(ylabel) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.show()
def extract_cumsum_train_times(loaded, time_units='seconds'): times = extract_train_epoch_times(loaded) times = times_to_cumsum_and_units(time_units, times) return times
def extract_train_epoch_times(loaded): return loaded[0]['train_epochs_times']
def times_to_cumsum_and_units(time_units, times): time_div_factor = {'seconds': 1, 'minutes': 60, 'hours': 3600} time_div_factor = time_div_factor.get(time_units.lower()) times = (np.array(times) / time_div_factor) times = np.cumsum(times) return times
def plot_time_vs_accuracy(*, gpipe_dict=None, stale_dict=None, times_gpipe=None, times_stale=None, time_units='hours', acc_without_ft=None, title='super_glue_boolq_accuracy', ylabel=f'Accuracy'): (fix, ax) = plt.subplots() if (acc_without_ft is None): ax.plot(times_gpipe, list(gpipe_dict.values()), marker=GPIPE_MARKER, label='gpipe') ax.plot(times_stale, list(stale_dict.values()), marker=STALE_MARKER, label='ours') else: ax.plot(([0] + list(times_gpipe)), ([acc_without_ft] + list(gpipe_dict.values())), marker=GPIPE_MARKER, label='gpipe') ax.plot(([0] + list(times_stale)), ([acc_without_ft] + list(stale_dict.values())), marker=STALE_MARKER, label='ours') ax.legend() ax.set_title(title) ax.set_xlabel(f'Time [{time_units}]') ax.set_ylabel(ylabel) plt.show()
def get_fixed_dict_and_times_single(exp_fn, checkpoints_eval_fn, checkpoint_every_x_epochs=1, epochs_in_last_checkpoint=None, time_units='hours', subkey=None): times_list = extract_cumsum_train_times(load_experiment(exp_fn), time_units=time_units) checkpoints_dict = extract_values(parse_all_eval_results_dict(checkpoints_eval_fn), subkey=subkey) if (checkpoint_every_x_epochs > 1): gpipe_dict_ = {(k * checkpoint_every_x_epochs): v for (k, v) in list(checkpoints_dict.items())[:(- 1)]} if (epochs_in_last_checkpoint is None): epochs_in_last_checkpoint = (len(times_list) % checkpoint_every_x_epochs) warnings.warn(f'plot_epochs_vs_accuracy may be inaccurate point for last epoch, inferring it: epochs_in_last_checkpoint={epochs_in_last_checkpoint}') print(f'epochs_in_last_checkpoint={epochs_in_last_checkpoint}') (k, v) = list(checkpoints_dict.items())[(- 1)] if (epochs_in_last_checkpoint == 0): gpipe_dict_[((k * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v else: gpipe_dict_[(((k - 1) * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v times_gpipe_ = [times_list[i] for i in range(0, len(times_list), checkpoint_every_x_epochs)] if ((len(times_list) % checkpoint_every_x_epochs) > 0): times_gpipe_.append(times_list[(- 1)]) times_list = times_gpipe_ checkpoints_dict = gpipe_dict_ return (checkpoints_dict, times_list)
def analyze_datars(times1, times2, values1, values2, colors=('red', 'navy')): from adjustText import adjust_text all_ts = [] all_times = [*times1, *times2] all_vals = [*values1, *values2] for (times, values, color) in zip([times1, times2], [values1, values2], colors): max = np.max(values) min = values[0] percs = [0.4, 1] percs_nice = [(str(int((a * 100))) + '%') for a in percs] values = np.asarray(values) times = np.asarray(times) ids = [np.argmax((values >= ((x * (max - min)) + min))) for x in percs] points = [(times[i], values[i], pn) for (i, pn) in zip(ids, percs_nice)] ts = [plt.text(*a, color=color) for a in points] all_ts.extend(ts) ax = plt.gca() annotations = [child for child in ax.get_children() if (isinstance(child, matplotlib.text.Annotation) or isinstance(child, matplotlib.legend.Legend))] adjust_text(ts, x=all_times, y=all_vals, add_objects=annotations, arrowprops=dict(arrowstyle='->', fill=True, color=color))
def epoch_speedup_dict(exp_gpipe_fn, exp_stale_fn): times_gpipe = extract_cumsum_train_times(load_experiment(exp_gpipe_fn)) times_stale = extract_cumsum_train_times(load_experiment(exp_stale_fn)) d = epoch_speedup_dict_from_cumsum_times(times_gpipe, times_stale) return d
def epoch_speedup_dict_from_cumsum_times(times_gpipe, times_stale): try: assert (len(times_gpipe) == len(times_stale)), str((len(times_gpipe), len(times_stale))) except AssertionError as e: if ((len(times_gpipe) - len(times_stale)) == 1): warnings.warn('allowing 1 difference') times_gpipe = times_gpipe[:(- 1)] elif ((len(times_stale) - len(times_gpipe)) == 2): warnings.warn('allowing 2 difference') times_stale = times_stale[:(- 2)] else: raise e d = dict() for i in range(len(times_stale)): d[i] = (times_gpipe[i] / times_stale[i]) return d
def epoch_speedup_from_cumsum_times(*args, idx=(- 1), **kwargs): return list(epoch_speedup_dict_from_cumsum_times(*args, **kwargs).values())[idx]
def epoch_speedup(*args, idx=(- 1), **kwargs): return list(epoch_speedup_dict(*args, **kwargs).values())[idx]
def dump_all_raw_data(exp_stale_fn, exp_gpipe_fn, gpipe_fn, stale_fn, acc_without_ft=None): ' Prints all raw data used for analysis\n The rest are calculations on this data\n ' print('-I- dump_all_raw_data') print(parse_all_eval_results_dict(gpipe_fn)) print(parse_all_eval_results_dict(stale_fn)) print(load_experiment(exp_gpipe_fn)[0]['train_epochs_times']) print(load_experiment(exp_stale_fn)[0]['train_epochs_times']) if (acc_without_ft is not None): print('result_without_fine_tuning:', acc_without_ft)
def time_to_best_result(gpipe_dict, stale_dict, times_gpipe, times_stale, slow_alg_name='gpipe', fast_alg_name='stale'): values_gpipe = list(gpipe_dict.values()) values_stale = list(stale_dict.values()) max_gpipe = np.max(values_gpipe) max_stale = np.max(values_stale) argmax_gpipe = np.argmax(values_gpipe) argmax_stale = np.argmax(values_stale) time_to_best_gpipe = times_gpipe[argmax_gpipe] time_to_best_stale = times_stale[argmax_stale] records = [] records.append({'alg': slow_alg_name, 'best_result': max_gpipe, 'best_result_epoch': list(gpipe_dict.keys())[int(argmax_gpipe)], 'time': time_to_best_gpipe}) records.append({'alg': fast_alg_name, 'best_result': max_stale, 'best_result_epoch': list(stale_dict.keys())[int(argmax_stale)], 'time': time_to_best_stale}) df = pd.DataFrame.from_records(records) print(df) speedup_to_best = (time_to_best_gpipe / time_to_best_stale) print('speedup_to_best_result:', speedup_to_best)
def compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale, skip_gpipe_seq=False): if (not skip_gpipe_seq): time_to_best_result(seq_gpipe_dict, virtual_stale_dict, seq_gpipe_times, virtual_times_stale, slow_alg_name='gpipe_seq', fast_alg_name='stale_mixed') print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_gpipe_times, virtual_times_stale)) time_to_best_result(seq_gpipe_dict, virtual_gpipe_dict, seq_gpipe_times, virtual_times_gpipe, slow_alg_name='gpipe_seq', fast_alg_name='gpipe_mixed') print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_gpipe_times, virtual_times_gpipe)) time_to_best_result(virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale, slow_alg_name='gpipe_mixed', fast_alg_name='stale_mixed') print('epoch_speedup', epoch_speedup_from_cumsum_times(virtual_times_gpipe, virtual_times_stale)) time_to_best_result(seq_stale_dict, virtual_stale_dict, seq_stale_times, virtual_times_stale, slow_alg_name='stale_seq', fast_alg_name='stale_mixed') print('epoch_speedup', epoch_speedup_from_cumsum_times(seq_stale_times, virtual_times_stale))
class MultiRC(): @staticmethod def all_speedups_multirc(): subkey = 'eval/super_glue_multirc_v102/f1' (seq_gpipe_dict, seq_gpipe_times) = Hack.get_multirc_seq_hack_gpipe_times_and_dict(subkey=subkey) exp_results_dir = 'results/t5/super_glue/multirc/' seq_exp_stale_fn = os.path.join(exp_results_dir, 'no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_8_se_4_seed_42.json') seq_stale_fn = 'results/all_results_no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_8_se_4_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_8_se_4_seed_42.txt' (seq_stale_dict, seq_stale_times) = get_fixed_dict_and_times_single(exp_fn=seq_exp_stale_fn, checkpoints_eval_fn=seq_stale_fn, subkey=subkey) exp_results_dir = 'results/t5/super_glue/multirc/' exp_stale_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_8_se_2_seed_42.json') exp_gpipe_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_8_se_8_seed_42.json') gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_8_se_8_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_8_se_8_seed_42.txt' stale_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_8_se_2_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_8_se_2_seed_42.txt' (virtual_gpipe_dict, virtual_times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn, subkey=subkey) (virtual_stale_dict, virtual_times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn, subkey=subkey) compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale)
class WIC(): @staticmethod def all_speedups_wic(): checkpoint_every_x_epochs = (100 // (5427 // 128)) seq_stale_fn = 'results/all_results_no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_128_se_4_seed_42_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_128_se_4_seed_42.txt' seq_exp_stale_fn = os.path.join('results/t5/super_glue/wic', 'no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_128_se_4_seed_42.json') (seq_stale_dict, seq_stale_times) = get_fixed_dict_and_times_single(exp_fn=seq_exp_stale_fn, checkpoints_eval_fn=seq_stale_fn, checkpoint_every_x_epochs=checkpoint_every_x_epochs) exp_results_dir = 'results/t5/super_glue/wic/' exp_stale_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_128_se_2_seed_42.json') exp_gpipe_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_128_se_8_seed_42.json') gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_128_se_8_seed_42_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_128_se_8_seed_42.txt' stale_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_128_se_2_seed_42_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_128_se_2_seed_42.txt' (virtual_gpipe_dict, virtual_times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn, checkpoint_every_x_epochs=checkpoint_every_x_epochs) (virtual_stale_dict, virtual_times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn, checkpoint_every_x_epochs=checkpoint_every_x_epochs) seq_gpipe_dict = None seq_gpipe_times = None compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale, skip_gpipe_seq=True)
class BoolQ(): @staticmethod def all_speedups_boolq(): (seq_gpipe_dict, seq_gpipe_times) = Hack.get_boolq_seq_hack_gpipe_times_and_dict() seq_stale_fn = 'results/FOR_PAPER/all_results_new_t5_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42.txt' seq_exp_stale_fn = os.path.join('results/t5/super_glue/boolq', 'new_t5_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42.json') (seq_stale_dict, seq_stale_times) = get_fixed_dict_and_times_single(exp_fn=seq_exp_stale_fn, checkpoints_eval_fn=seq_stale_fn) exp_results_dir = 'results/t5/super_glue/boolq/' exp_stale_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42.json') exp_gpipe_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42.json') gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42.txt' stale_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42.txt' (virtual_gpipe_dict, virtual_times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn) (virtual_stale_dict, virtual_times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn) compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale)
class RTE(): @staticmethod def all_speedups_rte(): (seq_gpipe_dict, seq_gpipe_times) = Hack.get_rte_seq_hack_gpipe_times_and_dict() seq_stale_fn = 'results/FOR_PAPER/all_results_glue_rte_12_epochs_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_40_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_40_se_10_seed_42.txt' seq_exp_stale_fn = os.path.join('results/t5/glue/rte', 'glue_rte_12_epochs_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_40_se_10_seed_42.json') (seq_stale_dict, seq_stale_times) = get_fixed_dict_and_times_single(exp_fn=seq_exp_stale_fn, checkpoints_eval_fn=seq_stale_fn) exp_results_dir = 'results_b4_20_5_changes/t5/glue/rte' exp_stale_fn = os.path.join(exp_results_dir, 'new_args_rte_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42.json') exp_results_dir = 'results_new_t5/t5/glue/rte' exp_gpipe_fn = os.path.join(exp_results_dir, 'rte_virtual_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_40_se_10_seed_42.json') gpipe_fn = 'results_new_t5/all_results_rte_virtual_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_40_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_40_se_10_seed_42.txt' stale_fn = 'results/FOR_PAPER/all_results_new_args_rte_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42.txt' (virtual_gpipe_dict, virtual_times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn) (virtual_stale_dict, virtual_times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn) compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale)
class Hack(): @staticmethod def get_rte_seq_hack_gpipe_times_and_dict(): exp_gpipe_fn = 'results_new_t5/t5/glue/rte/glue_rte_12_epochs_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream_t5_tfds_gpipe_bs_40_se_10_seed_42.json' gpipe_fn = 'results_new_t5/all_results_rte_virtual_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_40_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_40_se_10_seed_42.txt' d = {'train_epochs_times': [397.09888553619385, 401.3545401096344, 400.58319187164307, 401.8276650905609, 400.83459973335266, 400.93730449676514]} (gpipe_dict, times_gpipe) = Hack.extrapolate_gpipe_times_and_acc_seq(d, exp_gpipe_fn, gpipe_fn) return (gpipe_dict, times_gpipe) @staticmethod def get_boolq_seq_hack_gpipe_times_and_dict(): exp_results_dir = 'results/t5/super_glue/boolq/' exp_gpipe_fn = os.path.join(exp_results_dir, 'new_t5_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_gpipe_bs_20_se_10_seed_42.json') gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42.txt' d = {'train_epochs_times': extract_train_epoch_times(load_experiment(exp_gpipe_fn))} (gpipe_dict, times_gpipe) = Hack.extrapolate_gpipe_times_and_acc_seq(d, exp_gpipe_fn, gpipe_fn) return (gpipe_dict, times_gpipe) @staticmethod def get_wic_seq_hack_gpipe_times_and_dict(): gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_128_se_8_seed_42_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_128_se_8_seed_42.txt' exp_gpipe_fn = 'results/t5/super_glue/wic/no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream_t5_tfds_gpipe_bs_128_se_8_seed_42.json' d = {'train_epochs_times': extract_train_epoch_times(load_experiment(exp_gpipe_fn))} checkpoint_every_x_epochs = (500 // (5427 // 128)) (gpipe_dict, times_gpipe) = Hack.extrapolate_gpipe_times_and_acc_seq(d, exp_gpipe_fn, gpipe_fn, checkpoint_every_x_epochs=checkpoint_every_x_epochs) return (gpipe_dict, times_gpipe) @staticmethod def get_multirc_seq_hack_gpipe_times_and_dict(subkey='eval/super_glue_multirc_v102/f1'): gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_8_se_8_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_8_se_8_seed_42.txt' exp_gpipe_fn = 'results/t5/super_glue/multirc/no_virtual_stages_benchmark_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_gpipe_bs_8_se_8_seed_42.json' d = {'train_epochs_times': extract_train_epoch_times(load_experiment(exp_gpipe_fn))} (gpipe_dict, times_gpipe) = Hack.extrapolate_gpipe_times_and_acc_seq(d, exp_gpipe_fn, gpipe_fn, subkey=subkey) return (gpipe_dict, times_gpipe) @staticmethod def extrapolate_gpipe_times_and_acc_seq(d, exp_gpipe_fn, gpipe_fn, checkpoint_every_x_epochs=1, epochs_in_last_checkpoint=None, subkey=None): gpipe_virtual_train_epoch_times = extract_train_epoch_times(load_experiment(exp_gpipe_fn)) (gpipe_dict, times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn, checkpoint_every_x_epochs=checkpoint_every_x_epochs, epochs_in_last_checkpoint=epochs_in_last_checkpoint, subkey=subkey) times_gpipe_ = d['train_epochs_times'] assert (len(times_gpipe_) == len(times_gpipe)), (len(times_gpipe_), len(times_gpipe)) while (len(times_gpipe_) < (len(gpipe_dict) - 1)): times_gpipe_.append(random.choice(d['train_epochs_times'])) factor = (np.mean(d['train_epochs_times']) / np.mean(gpipe_virtual_train_epoch_times[:len(d['train_epochs_times'])])) times_gpipe_.append((factor * times_gpipe[(- 1)])) times_gpipe = times_to_cumsum_and_units(time_units='hours', times=times_gpipe_) return (gpipe_dict, times_gpipe)
class AnnotationPlotsRTE(): @staticmethod def winning_RTE_seq_gpipe_vs_MIXED_stale(): set_style() (gpipe_dict, times_gpipe) = Hack.get_rte_seq_hack_gpipe_times_and_dict() exp_results_dir = 'results_b4_20_5_changes/t5/glue/rte' exp_stale_fn = os.path.join(exp_results_dir, 'new_args_rte_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42.json') stale_fn = 'results_b4_20_5_changes/all_results_new_args_rte_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42_layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_40_se_5_seed_42.txt' acc_without_ft = 87.72563176895306 (stale_dict, times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn) higher = times_gpipe lower = times_stale print('epoch_speedup', epoch_speedup_from_cumsum_times(higher, lower)) time_to_best_result(gpipe_dict, stale_dict, times_gpipe, times_stale) AnnotationPlotsRTE.winning_epochs(acc_without_ft, gpipe_dict, stale_dict) AnnotationPlotsRTE.winning_tta(acc_without_ft, gpipe_dict, stale_dict, times_gpipe, times_stale) @staticmethod def winning_epochs(acc_without_ft, gpipe_dict, stale_dict, dirname='results/paper_plots/', pdfname='new_Final_Plot_winning_RTE_seq_gpipe_vs_MIXED_stale_EPOCHS.pdf'): (fig, ax) = plt.subplots(figsize=(width, height)) ax.plot(([0] + list(gpipe_dict.keys())), ([acc_without_ft] + list(gpipe_dict.values())), marker='^', label='GPipe', color='navy') ax.plot(([0] + list(stale_dict.keys())), ([acc_without_ft] + list(stale_dict.values())), marker='o', label='FTPipe', color='red') ax.set_ylim(86, 92) ax.set_xlabel(f'Epochs') ax.set_ylabel(f'Accuracy') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) analyze_datars(([0] + list(stale_dict.keys())), ([0] + list(gpipe_dict.keys())), ([acc_without_ft] + list(stale_dict.values())), ([acc_without_ft] + list(gpipe_dict.values()))) ax.legend(frameon=False, loc='best', borderaxespad=0) os.makedirs(dirname, exist_ok=True) fullpdfname = str(os.path.join(dirname, pdfname)) plt.savefig(fullpdfname, transparent=False) plt.show() @staticmethod def winning_tta(acc_without_ft, gpipe_dict, stale_dict, times_gpipe, times_stale, dirname='results/paper_plots/', pdfname='new_Final_Plot_winning_RTE_seq_gpipe_vs_MIXED_stale_TTA.pdf'): (fix, ax) = plt.subplots(figsize=(width, height)) ax.plot(([0] + list(times_gpipe)), ([acc_without_ft] + list(gpipe_dict.values())), marker='^', label='GPipe', color='navy') ax.plot(([0] + list(times_stale)), ([acc_without_ft] + list(stale_dict.values())), marker='o', label='FTPipe', color='red') ax.set_ylim(86, 92) ax.set_xlabel(f'Time (Hours)') ax.set_ylabel(f'Accuracy') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) analyze_datars(([0] + list(times_stale)), ([0] + list(times_gpipe)), ([acc_without_ft] + list(stale_dict.values())), ([acc_without_ft] + list(gpipe_dict.values()))) ax.legend(frameon=False, loc='best', borderaxespad=0) os.makedirs(dirname, exist_ok=True) fullpdfname = str(os.path.join(dirname, pdfname)) plt.savefig(fullpdfname, transparent=False) plt.show()
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']})
def parse_all_eval_results_dict(fn): with open(fn, 'r') as f: d = ast.literal_eval(f.read()) return d
def extract_values(d, subkey=None, verbose=False): if (subkey is None): s = set() for v in d.values(): for x in v.keys(): s.add(x) if (len(s) == 1): subkey = next(iter(s)) else: raise ValueError('please choose subkey from', s) if verbose: print(f'inferring subkey as {subkey}') keys = [(1 + x) for x in list(d.keys())] values = [d[k][subkey] for k in d] return {k: v for (k, v) in zip(keys, values)}
def plot_epochs_vs_accuracy(*, gpipe_dict=None, stale_dict=None, acc_without_ft=None, title='super_glue_boolq_accuracy', ylabel=f'Accuracy'): (fix, ax) = plt.subplots() if (acc_without_ft is None): ax.plot(list(gpipe_dict.keys()), list(gpipe_dict.values()), marker=GPIPE_MARKER, label='gpipe') ax.plot(list(stale_dict.keys()), list(stale_dict.values()), marker=STALE_MARKER, label='ours') else: ax.plot(([0] + list(gpipe_dict.keys())), ([acc_without_ft] + list(gpipe_dict.values())), marker=GPIPE_MARKER, label='gpipe') ax.plot(([0] + list(stale_dict.keys())), ([acc_without_ft] + list(stale_dict.values())), marker=STALE_MARKER, label='ours') ax.legend() ax.set_title(title) ax.set_xlabel(f'Epochs') ax.set_ylabel(ylabel) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.show()
def extract_cumsum_train_times(loaded, time_units='seconds'): times = extract_train_epoch_times(loaded) times = times_to_cumsum_and_units(time_units, times) return times
def extract_train_epoch_times(loaded): return loaded[0]['train_epochs_times']
def times_to_cumsum_and_units(time_units, times): time_div_factor = {'seconds': 1, 'minutes': 60, 'hours': 3600} time_div_factor = time_div_factor.get(time_units.lower()) times = (np.array(times) / time_div_factor) times = np.cumsum(times) return times
def plot_time_vs_accuracy(*, gpipe_dict=None, stale_dict=None, times_gpipe=None, times_stale=None, time_units='hours', acc_without_ft=None, title='super_glue_boolq_accuracy', ylabel=f'Accuracy'): (fix, ax) = plt.subplots() if (acc_without_ft is None): ax.plot(times_gpipe, list(gpipe_dict.values()), marker=GPIPE_MARKER, label='gpipe') ax.plot(times_stale, list(stale_dict.values()), marker=STALE_MARKER, label='ours') else: ax.plot(([0] + list(times_gpipe)), ([acc_without_ft] + list(gpipe_dict.values())), marker=GPIPE_MARKER, label='gpipe') ax.plot(([0] + list(times_stale)), ([acc_without_ft] + list(stale_dict.values())), marker=STALE_MARKER, label='ours') ax.legend() ax.set_title(title) ax.set_xlabel(f'Time [{time_units}]') ax.set_ylabel(ylabel) plt.show()
def get_fixed_dict_and_times_single(exp_fn, checkpoints_eval_fn, checkpoint_every_x_epochs=1, epochs_in_last_checkpoint=None, time_units='hours', subkey=None): times_list = extract_cumsum_train_times(load_experiment(exp_fn), time_units=time_units) checkpoints_dict = extract_values(parse_all_eval_results_dict(checkpoints_eval_fn), subkey=subkey) if (checkpoint_every_x_epochs > 1): gpipe_dict_ = {(k * checkpoint_every_x_epochs): v for (k, v) in list(checkpoints_dict.items())[:(- 1)]} if (epochs_in_last_checkpoint is None): epochs_in_last_checkpoint = (len(times_list) % checkpoint_every_x_epochs) warnings.warn(f'plot_epochs_vs_accuracy may be inaccurate point for last epoch, infering it: epochs_in_last_checkpoint={epochs_in_last_checkpoint}') print(f'epochs_in_last_checkpoint={epochs_in_last_checkpoint}') (k, v) = list(checkpoints_dict.items())[(- 1)] if (epochs_in_last_checkpoint == 0): gpipe_dict_[((k * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v else: gpipe_dict_[(((k - 1) * checkpoint_every_x_epochs) + epochs_in_last_checkpoint)] = v times_gpipe_ = [times_list[i] for i in range(0, len(times_list), checkpoint_every_x_epochs)] if ((len(times_list) % checkpoint_every_x_epochs) > 0): times_gpipe_.append(times_list[(- 1)]) times_list = times_gpipe_ checkpoints_dict = gpipe_dict_ return (checkpoints_dict, times_list)
def analyze_datars(times1, times2, values1, values2, colors=['red', 'navy']): from adjustText import adjust_text all_ts = [] all_times = [*times1, *times2] all_vals = [*values1, *values2] for (times, values, color) in zip([times1, times2], [values1, values2], colors): max = np.max(values) min = values[0] percs = [0.75, 1] values = np.asarray(values) times = np.asarray(times) ids = [np.argmax((values >= ((x * (max - min)) + min))) for x in percs] percs_nice = ['75%', '100%'] points = [(times[i], values[i], pn) for (i, pn) in zip(ids, percs_nice)] ts = [plt.text(*a, color=color) for a in points] all_ts.extend(ts) ax = plt.gca() annotations = [child for child in ax.get_children() if (isinstance(child, matplotlib.text.Annotation) or isinstance(child, matplotlib.legend.Legend))] adjust_text(ts, x=all_times, y=all_vals, add_objects=annotations, arrowprops=dict(arrowstyle='->', fill=True, color=color)) arrow_props = {}
def epoch_speedup_dict(exp_gpipe_fn, exp_stale_fn): times_gpipe = extract_cumsum_train_times(load_experiment(exp_gpipe_fn)) times_stale = extract_cumsum_train_times(load_experiment(exp_stale_fn)) d = epoch_speedup_dict_from_cumsum_times(times_gpipe, times_stale) return d
def epoch_speedup_dict_from_cumsum_times(times_gpipe, times_stale): assert (len(times_gpipe) == len(times_stale)) d = dict() for i in range(len(times_stale)): d[i] = (times_gpipe[i] / times_stale[i]) return d
def epoch_speedup_from_cumsum_times(*args, idx=(- 1), **kwargs): return list(epoch_speedup_dict_from_cumsum_times(*args, **kwargs).values())[idx]
def epoch_speedup(*args, idx=(- 1), **kwargs): return list(epoch_speedup_dict(*args, **kwargs).values())[idx]
def dump_all_raw_data(exp_stale_fn, exp_gpipe_fn, gpipe_fn, stale_fn, acc_without_ft=None): ' Prints all raw data used for analysis\n The rest are calculations on this data\n ' print('-I- dump_all_raw_data') print(parse_all_eval_results_dict(gpipe_fn)) print(parse_all_eval_results_dict(stale_fn)) print(load_experiment(exp_gpipe_fn)[0]['train_epochs_times']) print(load_experiment(exp_stale_fn)[0]['train_epochs_times']) if (acc_without_ft is not None): print('result_without_fine_tuning:', acc_without_ft)
def time_to_best_result(gpipe_dict, stale_dict, times_gpipe, times_stale, slow_alg_name='gpipe', fast_alg_name='stale'): values_gpipe = list(gpipe_dict.values()) values_stale = list(stale_dict.values()) max_gpipe = np.max(values_gpipe) max_stale = np.max(values_stale) argmax_gpipe = np.argmax(values_gpipe) argmax_stale = np.argmax(values_stale) time_to_best_gpipe = times_gpipe[argmax_gpipe] time_to_best_stale = times_stale[argmax_stale] records = [] records.append({'alg': slow_alg_name, 'best_result': max_gpipe, 'best_result_epoch': list(gpipe_dict.keys())[int(argmax_gpipe)], 'time': time_to_best_gpipe}) records.append({'alg': fast_alg_name, 'best_result': max_stale, 'best_result_epoch': list(stale_dict.keys())[int(argmax_stale)], 'time': time_to_best_stale}) df = pd.DataFrame.from_records(records) print(df) speedup_to_best = (time_to_best_gpipe / time_to_best_stale) print('speedup_to_best_result:', speedup_to_best)
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}) import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}) import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}) import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42
def parse_distributed_cli(parser): parser.add_argument('--rank', default=None, type=int, help='Rank of worker, given by torch.distributed.launch, overridden otherwise') parser.add_argument('--local_rank', default=0, type=int, help='Local rank of worker, given by torch.distributed.launch, overridden otherwise') parser.add_argument('--distributed_backend', choices=['gloo', 'nccl', 'mpi'], default='mpi', type=str, help='distributed backend to use, given by torch.distributed.launch, overridden otherwise') parser.add_argument('--nnodes', default=1, type=int, help='number of nodes') parser.add_argument('--max_buffers', type=int, default=1, help='Maximal Number of async recv buffers. With 1: it actually means the recv is sync.') parser.add_argument('--keep_buffers_alive', action='store_true', default=False, help='Keep forward buffers for both train and eval instead of dynamically creating them every iteration') parser.add_argument('--flush_rate', type=int, default=(- 1), help='Flush the pipeline after flush_rate batches (default: -1, flush after iteration over dataloader)')
def parse_multiprocessing_cli(parser): parser.add_argument('--nprocs', type=int, default=4, help='Tells us how much processes do we want') parser.add_argument('--master_port', type=int, default=29500) parser.add_argument('--verbose_comm', action='store_true') parser.add_argument('--verbose_comm_from_cmd', action='store_true')
def parse_cli(): parser = argparse.ArgumentParser(description='PyTorch partition as part of Async Pipeline') parser.add_argument('--mode', choices=['dist', 'mp', 'preproc', 'eval'], default='dist', help='Running mode') parse_distributed_cli(parser) parse_multiprocessing_cli(parser) parser.add_argument('--model', type=str, required=False) parser.add_argument('--model_from_cmd', action='store_true') parser.add_argument('--debug', nargs='*', type=int, default=False, help='Will wait for debugger attachment on given ranks.') parser.add_argument('--config', help='Config File', default='configs/dummy.json') parser.add_argument('--bs_train', type=int, help='Train batch size', default=128, metavar='B') parser.add_argument('--bs_train_from_cmd', action='store_true') parser.add_argument('--bs_test', type=int, help='Test batch size', default=200, metavar='BT') add_dataset_argument(parser) parser.add_argument('--seed', '-s', type=int, help='Random seed', default=42) parser.add_argument('--logdir', type=str, default='./logs', help='where logs and events go') parser.add_argument('--out_dir', '-o', type=str, help='Output folder for results', default='./results') parser.add_argument('--out_dir_from_cmd', action='store_true') parser.add_argument('--data_dir', type=str, help='Data directory', required=False) parser.add_argument('--out_filename', '-n', type=str, default='out', help='Name of output file') parser.add_argument('--cpu', action='store_true', default=False, help='run partition on cpu') parser.add_argument('--num_data_workers', type=int, help='Number of workers to use for dataloading', default=0) parser.add_argument('--epochs', type=int, help='Training epochs to run', default=(- 1)) parser.add_argument('--epochs_from_cmd', action='store_true') parser.add_argument('--steps', type=int, help='Training steps to run', default=(- 1)) parser.add_argument('--steps_from_cmd', action='store_true') parser.add_argument('--step_every', type=int, help='Aggregation steps', default=1) parser.add_argument('--step_every_from_cmd', action='store_true') parser.add_argument('--num_chunks', help='Number of chunks for Double Buffering', type=int, default=1) parser.add_argument('--weight_stashing', action='store_true', default=False, help='Do weight Stashing') parser.add_argument('--log_frequency', type=int, default=100, help='Print extra statistics every given number of batches') parser.add_argument('--data_propagator', default='auto', help='Data propagation inside the pipeline') parser.add_argument('--no_recomputation', action='store_true', default=False, help='Will not use recomputation (trading speed for memory).') parser.add_argument('--base_config_path', nargs='*', type=str, default=[], help='config pathes to override. Must follow the same relativity rule') parser.add_argument('--explicit_eval_cp', required=False, type=str, help='explicit name for eval cp') parser.add_argument('--eval_device', required=False, type=str, default='cuda:0', help='device to eval on first input') parser.add_argument('--single_worker_eval_batch_size', required=False, type=int, default=32, help='batch size used at T5 generation') args = parser.parse_args() if args.base_config_path: args.base_config_path_from_cmd = True return args
def maybe_parse_mpi_env_vars(args): '\n Parses env vars (e.g from mpirun) and push them into args (overriding).\n This allows completing some "incomplete" cli-argument parsing.\n\n Requires:\n args = parse_cli()\n\n References:\n https://www.open-mpi.org/faq/?category=running#mpi-environmental-variables\n ' if (args.distributed_backend == 'mpi'): args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
def save_distributed_experiment(statistics, args, world_size, rank, local_rank, stage): def careful_del(x, n): if (n in x): del x[n] un_needed_args = ['stage', 'rank', 'local_rank'] if (rank == (world_size - 1)): if statistics: fit_res = statistics.get_stats(stage) config = vars(args) for name in un_needed_args: careful_del(config, name) save_experiment(args.out_filename, args.out_dir, config, fit_res) torch.distributed.barrier() for current_rank in reversed(range((world_size - 1))): if (rank == current_rank): if statistics: my_fit_res = statistics.get_stats(stage) (config, fit_res) = load_experiment_for_update(args.out_filename, args.out_dir) for (k, v) in my_fit_res.items(): if (k not in fit_res): fit_res[k] = v save_experiment(args.out_filename, args.out_dir, config, fit_res) torch.distributed.barrier() print(f'rank{rank}: save_distributed_experiment - Done')
def mp_queue_matrix(args, start_method='spawn'): 'create queues matrix to be shared among precesses' mmp = mp.get_context(start_method) world_size = args.world_size cfg = args.model prefer_seq_sends = True handler = AVAILABLE_MODELS.get(cfg) if (handler is None): raise ValueError(f'Model {cfg} not found. AVAILABLE_MODELS={AVAILABLE_MODELS.keys()}') pipe_config = handler.get_pipe_config() stage_to_rank_map = pipe_config.get_stage_to_ranks_map() queues = {i: dict() for i in range(world_size)} for rank in range(world_size): stage_id = pipe_config.rank_to_stage_idx(rank) (send_ranks_i, receive_ranks_i) = get_my_send_recv_ranks(pipe_config, stage_id, stage_to_rank_map=stage_to_rank_map, prefer_seq_sends=prefer_seq_sends) for r in itertools.chain.from_iterable(send_ranks_i.values()): queues[rank][r] = mmp.SimpleQueue() for r in itertools.chain.from_iterable(receive_ranks_i.values()): queues[rank][r] = mmp.SimpleQueue() return queues
def multiprocessing_worker(rank, args, share): mp.set_start_method('fork', force=True) local_rank = rank args.rank = rank args.local_rank = local_rank args.is_multiprocessing_worker = True backend = 'gloo' current_env = os.environ current_env['MASTER_ADDR'] = '127.0.0.1' current_env['MASTER_PORT'] = str(args.master_port) current_env['WORLD_SIZE'] = str(args.world_size) current_env['RANK'] = str(rank) current_env['LOCAL_RANK'] = str(local_rank) torch.distributed.init_process_group(backend, init_method='env://', rank=rank, world_size=args.world_size) main(args, share)
def start_distributed(python_args_dict=None): args = get_basic_args(python_args_dict) maybe_parse_mpi_env_vars(args) args.world_size = get_world_size(args.distributed_backend) args.is_multiprocessing_worker = False main(args)
def main(args, shared_ctx=None): if (args.debug and ((args.rank in args.debug) or ((- 1) in args.debug))): import ptvsd port = (3000 + args.local_rank) args.num_data_workers = 0 address = ('127.0.0.1', port) print(f'-I- rank {args.rank} waiting for attachment on {address}') ptvsd.enable_attach(address=address) ptvsd.wait_for_attach() else: delattr(args, 'debug') torch.manual_seed(args.seed) np.random.seed(args.seed) if getattr(args, 'cudnn_benchmark', True): torch.backends.cudnn.benchmark = True if getattr(args, 'cudnn_deterministic', False): torch.backends.cudnn.deterministic = True if getattr(args, 'deterministic_mode', False): torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False try: torch.use_deterministic_algorithms(True) except: pass (logger, train_dl, test_dl, is_first_partition, is_last_partition, partition, statistics, train_dl_len, test_dl_len, samplers) = prepare_pipeline(args, shared_ctx=shared_ctx) exp_start_time = time.time() times_res = training_loop(args, logger, train_dl, test_dl, is_last_partition, partition, statistics, train_dl_len, test_dl_len, samplers) exp_total_time = (time.time() - exp_start_time) args.total_epoch_times = times_res[0] args.train_epochs_times = times_res[1] args.exp_total_time = exp_total_time save_distributed_experiment(statistics, args, args.world_size, args.rank, args.local_rank, args.stage)
def start_mutiprocessing(python_args_dict=None): args = get_basic_args(python_args_dict) args.world_size = args.nprocs start_method = 'spawn' rcv_queues = mp_queue_matrix(args, start_method=start_method) buffer_reuse_queues = mp_queue_matrix(args, start_method=start_method) share = (rcv_queues, buffer_reuse_queues) mp.start_processes(multiprocessing_worker, args=(args, share), nprocs=args.nprocs, join=True, daemon=False, start_method=start_method)
def start_preproc(python_args_dict=None): args = get_basic_args(python_args_dict) args.world_size = args.nprocs cache = None for rank in range(args.world_size): print(f'-I- preprocessing data for rank {rank}/{(args.world_size - 1)} (word size is {args.world_size})...') local_rank = rank args.rank = rank args.local_rank = local_rank args.is_multiprocessing_worker = False cache = preproc_data(args, cache, save_cache=True)
def start_eval_checkpoint(python_args_dict=None): args = get_basic_args(python_args_dict) all_results = get_all_eval_results(args) pprint(all_results) with io.StringIO() as buf, redirect_stdout(buf): pprint(all_results) s = buf.getvalue() auto_file_name(args) fn = f'results/all_results_{args.out_filename}.txt' with open(fn, 'w+') as f: f.write(s) print('-I- saved all all results in {fn}') print('-I- Done')
def get_basic_args(python_args_dict=None): args = parse_cli() if python_args_dict: add_parsed_config_to_args(args, python_args_dict) else: parse_json_config(args, args.config, first=True) return args
def start(python_args_dict=None): print(f'Using {torch.get_num_threads()} Threads') args = parse_cli() if (args.mode == 'mp'): print('Running in multiprocessing mode') start_mutiprocessing(python_args_dict=python_args_dict) elif (args.mode == 'preproc'): print('Running in preproc mode: Preprocessing data...') start_preproc(python_args_dict=python_args_dict) elif (args.mode == 'eval'): print('Running in eval mode: Evaluating checkpoints...') start_eval_checkpoint(python_args_dict=python_args_dict) else: print('Running in distributed mode') start_distributed(python_args_dict=python_args_dict)
def get_config(): config = ConfigDict() config.logdir = 'logs/t5/mpipe/' config.data_dir = '/home_local/saareliad/data' config.out_dir = 'results/t5/super_glue/boolq' config.auto_file_name = True config.out_filename = 'test_vs' config.distributed_backend = 'mpi' config.model = 't5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe' config.stage_to_device_map = [0, 1, 2, 3, 4, 5, 6, 7, 6, 1, 4, 5, 3, 2, 0] config.nprocs = 15 config.dataset = 't5_tfds' config.mixture_or_task_name = 'super_glue_boolq_v102' config.preproc_batch_size = 128 config.trainer = ConfigDict() config.trainer.type = 't5' config.trainer.args = ConfigDict() config.trainer.args.loss_multiplier = 2.59 config.statistics = 'squad_loss_per_batch' config.step_every = 5 config.bs_train = 4 config.bs_test = 4 config.max_seq_length = 512 config.answer_max_seq_length = 4 config.num_data_workers = 5 config.optimizer = ConfigDict() config.optimizer.type = 'adafactor' config.optimizer.args = ConfigDict() config.optimizer.args.lr = 0.001 config.optimizer.args.weight_decay = 0 config.optimizer.args.scale_parameter = True config.optimizer.args.relative_step = False config.lr_scheduler = ConfigDict() config.lr_scheduler.type = 'get_constant_schedule_with_warmup' config.lr_scheduler.preproc_args = ConfigDict() config.lr_scheduler.args = ConfigDict() config.lr_scheduler.args.num_warmup_steps = 200 config.lr_scheduler.args.last_epoch = (- 1) config.epochs = (- 1) config.steps = 3200 config.seed_from_cmd = False config.seed = 42 config.bs_train_from_cmd = False config.bs_test_from_cmd = False config.num_chunks = 1 config.verbose_comm = False config.flush_rate = (- 1) config.cudnn_benchmark = True config.max_buffers = 1 config.keep_buffers_alive = False config.train_batches_limit = (- 1) config.test_batches_limit = 0 config.log_frequency = 200 config.model_name_or_path = 't5-3b' config.do_lower_case = True config.overwrite_cache = False config.dont_drop_last = True config.model_type = 't5' config.precomputed_masks = True config.save_checkpoints = True config.load_model_one_by_one = False config.weight_stashing = False config.work_scheduler = '1f1b' config.checkpoints_save_name_prefix = 'stale_adafactor' config.checkpoints_save_dir = '/nfs_Disk2/mpipe/checkpoints/t5/3b/boolq/stale/' config = config.to_dict() return config
class FileLogger(): def __init__(self, output_dir: str, global_rank: int, local_rank: int, name: str, world_size: int, name_prefix=''): self.output_dir = output_dir if (not os.path.exists(self.output_dir)): os.makedirs(self.output_dir, exist_ok=True) self.logger = FileLogger.get_logger(output_dir, global_rank=global_rank, local_rank=local_rank, name=name, world_size=world_size, name_prefix=name_prefix) def exception(self, *args_, **kwargs): return self.logger.exception(*args_, **kwargs) @staticmethod def get_logger(output_dir: str, global_rank: int, local_rank: int, name: str, world_size: int, name_prefix=''): logger_ = logging.getLogger(name) logger_.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') def get_name(u): curr_name = f'{name_prefix}-{u}-{global_rank}.log' curr_name = os.path.join(output_dir, curr_name) return curr_name vlog = logging.FileHandler(get_name('info')) vlog.setLevel(logging.INFO) vlog.setFormatter(formatter) logger_.addHandler(vlog) eventlog = logging.FileHandler(get_name('warn')) eventlog.setLevel(logging.WARN) eventlog.setFormatter(formatter) logger_.addHandler(eventlog) time_formatter = logging.Formatter('%(asctime)s - %(filename)s:%(lineno)d - %(message)s') debuglog = logging.FileHandler(get_name('debug')) debuglog.setLevel(logging.DEBUG) debuglog.setFormatter(time_formatter) logger_.addHandler(debuglog) console = logging.StreamHandler() console.setFormatter(formatter) console.setLevel(logging.DEBUG) logger_.addHandler(console) return logger_ def debug(self, *args_): self.logger.debug(*args_) def warning(self, *args_): self.logger.warning(*args_) def info(self, *args_): self.logger.info(*args_)
def MPI_Init(): print('-I- calling MPI_Init') argc = c_int() argv = POINTER(c_char_p)() mpi.MPI_Init(byref(argc), byref(argv))
def mpi_finalize(): print('-I- Calling MPI_Finalize') mpi.MPI_Finalize()
def process_begin_mpi(): MPI_Init() atexit.register(mpi_finalize)
def worker_function(local_rank, world_size): print('-I- my local_rank is', local_rank) import os os.environ['OMPI_COMM_WORLD_SIZE'] = str(world_size) os.environ['OMPI_COMM_WORLD_RANK'] = str(local_rank) os.environ['OMPI_COMM_WORLD_LOCAL_RANK'] = str(local_rank) os.environ['OMPI_UNIVERSE_SIZE'] = str(world_size) os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'] = str(world_size) os.environ['OMPI_COMM_WORLD_NODE_RANK'] = str(1) import torch.distributed as dist current_env = os.environ current_env['MASTER_ADDR'] = '127.0.0.1' current_env['MASTER_PORT'] = str(29500) current_env['WORLD_SIZE'] = str(world_size) current_env['RANK'] = str(local_rank) dist.init_process_group(backend='mpi', world_size=world_size) print(dist.get_world_size())
def wait(handlers): for i in handlers: i.wait()
def parse_cli(): parser = argparse.ArgumentParser(description='tst') parser.add_argument('--master_port', type=int, default=29500) parser.add_argument('--rank', default=None, type=int, help='Rank of worker') parser.add_argument('--local_rank', default=0, type=int, help='Local rank of worker') parser.add_argument('--distributed_backend', choices=['gloo', 'nccl', 'mpi'], default='gloo', type=str, help='distributed backend to use') parser.add_argument('--world_size', default=2, type=int, help='World size') args = parser.parse_args() return args
def gloo_cuda_test(): BACKAND = 'gloo' NUM_ISEND = 3 shape = (512, 32, 32, 64) args = parse_cli() local_rank = args.local_rank rank = args.local_rank print(local_rank) backend = 'gloo' current_env = os.environ current_env['MASTER_ADDR'] = '127.0.1.1' current_env['MASTER_PORT'] = str(args.master_port) current_env['WORLD_SIZE'] = str(args.world_size) current_env['RANK'] = str(rank) current_env['LOCAL_RANK'] = str(local_rank) dist.init_process_group(BACKAND, init_method='env://', rank=rank, world_size=2) handlers = [] if (dist.get_rank() == 0): device = torch.device('cuda:0') if (BACKAND == 'mpi'): torch.cuda.set_device(device) tensors = [torch.ones(*shape, device=device) for _ in range(NUM_ISEND)] handlers = [dist.isend(tensors[i], 1, tag=(i + 1)) for i in range(NUM_ISEND)] else: device = torch.device('cuda:1') if (BACKAND == 'mpi'): torch.cuda.set_device(device) tensors = [torch.zeros(*shape, device=device) for _ in range(NUM_ISEND)] handlers = [dist.irecv(tensors[i], 0, tag=(i + 1)) for i in range(NUM_ISEND)] wait(handlers) for i in range(NUM_ISEND): assert torch.all((tensors[i] == torch.ones(*shape, device=device))) print(f'Done {dist.get_rank()}')
def test_general(): dist.init_process_group(BACKAND, init_method='env://', world_size=2) handlers = [] if (dist.get_rank() == 0): device = torch.device(('cuda:0' if (BACKAND == 'mpi') else 'cpu')) if (BACKAND == 'mpi'): torch.cuda.set_device(device) tensors = [torch.ones(*shape, device=device) for _ in range(NUM_ISEND)] handlers = [dist.isend(tensors[i], 1, tag=(i + 1)) for i in range(NUM_ISEND)] else: device = torch.device(('cuda:1' if (BACKAND == 'mpi') else 'cpu')) if (BACKAND == 'mpi'): torch.cuda.set_device(device) tensors = [torch.zeros(*shape, device=device) for _ in range(NUM_ISEND)] handlers = [dist.irecv(tensors[i], 0, tag=(i + 1)) for i in range(NUM_ISEND)] wait(handlers) for i in range(NUM_ISEND): assert torch.all((tensors[i] == torch.ones(*shape, device=device))) print(f'Done {dist.get_rank()}')
def gpt2_tied(): COMMAND = 'mpirun -np 5 python main.py' cfgs_dir = 'configs/lm/wt2/gpt2/tied/' all_algs = ['stale'] param_grid = {'config': [f'{cfgs_dir}{cfg}.json' for cfg in all_algs], 'seed': [1322019]} run_grid_on_multi_gpu_per_run(COMMAND, param_grid, gpu_list=list(range(8)), gpus_per_config=4)
def gpt2xl(): COMMAND = 'python main.py --mode mp --nprocs 8 --step_every 8 --step_every_from_cmd' cfgs_dir = 'configs/lm/wt2/gpt2xl/untied/' all_algs = ['aggmsnag', 'stale', 'seq', 'gpipe'] param_grid = {'config': [f'{cfgs_dir}{cfg}.json' for cfg in all_algs], 'seed': [42, 20202020, 77777777, 314159, 1322019]} gpus_per_config = 8 helper = RunGridHelper(verbose=True, test=False, gpu_list=list(range(8))) helper.add_runs(COMMAND, param_grid, num_gpus=gpus_per_config) helper.run()
def grad_accumulation_WRN(): def mp_cv_grad_accumulation(helper, alg='stale_nr', model='wrn_28x10_c100_dr03_p4_group_norm', port=29500, seed=42): COMMAND = 'python main.py --mode mp' cv_cfgs_dir = 'configs/cv/cifar100/wrn28x10/no_recomputation/' gpus_per_config = 4 cfgs_dir = cv_cfgs_dir all_algs = [alg] common = {'config': [f'{cfgs_dir}{cfg}.json' for cfg in all_algs], 'seed': [seed], 'nprocs': [gpus_per_config], 'step_every_from_cmd': [''], 'bs_train_from_cmd': [''], 'out_dir_from_cmd': [''], 'out_dir': ['results/debug_se/gn/linscale/'], 'model': [model], 'model_from_cmd': ['']} param_grid_1 = {'step_every': [1], 'bs_train': [256]} param_grid_2 = {'step_every': [2], 'bs_train': [128]} param_grid_3 = {'step_every': [4], 'bs_train': [64]} param_grid = [param_grid_1, param_grid_2, param_grid_3] for (i, p) in enumerate(param_grid): p.update(**common) p['master_port'] = [(port + i)] helper.add_runs(COMMAND, param_grid, num_gpus=gpus_per_config) helper = RunGridHelper(verbose=True, test=False, gpu_list=list(range(8))) mp_cv_grad_accumulation(helper, 'stale_nr', port=29500, seed=42) mp_cv_grad_accumulation(helper, 'msnag_nr', port=29600, seed=42) mp_cv_grad_accumulation(helper, 'stale_nr', port=29700, seed=1322019) mp_cv_grad_accumulation(helper, 'msnag_nr', port=29800, seed=1322019) helper.run()
def t5_glue(): ALL_TASKS = {} def mp_helper(helper, alg='stale_nr', model='wrn_28x10_c100_dr03_p4_group_norm', port=29500, seed=42): COMMAND = 'python main.py --mode mp' cv_cfgs_dir = 'configs/cv/cifar100/wrn28x10/no_recomputation/' gpus_per_config = 4 cfgs_dir = cv_cfgs_dir all_algs = [alg] common = {'config': [f'{cfgs_dir}{cfg}.json' for cfg in all_algs], 'seed': [seed], 'nprocs': [gpus_per_config], 'step_every_from_cmd': [''], 'bs_train_from_cmd': [''], 'out_dir_from_cmd': [''], 'out_dir': ['results/debug_se/gn/linscale/'], 'model': [model], 'model_from_cmd': ['']} param_grid_1 = {'step_every': [1], 'bs_train': [256]} param_grid_2 = {'step_every': [2], 'bs_train': [128]} param_grid_3 = {'step_every': [4], 'bs_train': [64]} param_grid = [param_grid_1, param_grid_2, param_grid_3] for (i, p) in enumerate(param_grid): p.update(**common) p['master_port'] = [(port + i)] helper.add_runs(COMMAND, param_grid, num_gpus=gpus_per_config)
def parse_cli(): parser = argparse.ArgumentParser('replicate experiments grid') parser.add_argument('-e', '--exp', choices=AVAIALBE_EXPS.keys(), default='grad_accumulation_WRN') args = parser.parse_args() return args
def get_input_args_kwargs(sample) -> Tuple[(Tuple, Dict)]: if isinstance(sample, dict): kwargs = sample args = tuple() elif isinstance(sample, tuple): kwargs = dict() args = sample else: kwargs = dict() args = (sample,) return (args, kwargs)
def run_sanity_check(cmd_args: Namespace, partitioner: PartitioningTask, analysis_config: AnalysisPipelineConfig, device='cpu', training=False, check_grads=True, ref_model=None, check_init=False): try: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False try: torch.use_deterministic_algorithms(True) except: pass except: pass is_ok = True (args, kwargs) = get_input_args_kwargs(partitioner.get_input(cmd_args, analysis=False)) assert (len(args) == 0), 'only kwargs are supported for sanity checks' if check_init: torch.cuda.synchronize() torch.manual_seed(0) model = partitioner.get_model(cmd_args) model.train(training) a1 = model(*args, **kwargs) del model torch.cuda.synchronize() torch.manual_seed(0) model = partitioner.get_model(cmd_args) model.train(training) a2 = model(*args, **kwargs) torch.cuda.synchronize() assert torch.allclose(a1, a2), ('intialization check failed' + str((a1, a2))) del a1, a2, model torch.manual_seed(0) torch.cuda.synchronize() (output, (activations, req_grad)) = run_partitions_fwd(kwargs, analysis_config, device=device, return_info_for_bwd=True) torch.cuda.synchronize() assert ((len(output) == 1) and isinstance(output[0], torch.Tensor)) output = output[0] if (device == 'cpu'): assert (not output.is_cuda) else: assert output.is_cuda torch.manual_seed(0) torch.cuda.synchronize() if (ref_model is None): ref_model = partitioner.get_model(cmd_args) ref_model.to(device).train(training) torch.cuda.synchronize() with torch.no_grad(): kwargs_to_ref_model = move_tensors(kwargs, device) args_to_ref_model = move_tensors(args, device) torch.cuda.synchronize() torch.manual_seed(0) ref_output = ref_model(*args_to_ref_model, **kwargs_to_ref_model) torch.cuda.synchronize() del kwargs_to_ref_model, args_to_ref_model ref_model = ref_model.cpu() assert isinstance(ref_output, torch.Tensor) if (device == 'cpu'): assert (not ref_output.is_cuda) else: assert ref_output.is_cuda assert (ref_output.device == output.device) if torch.allclose(output, ref_output): print(f''' outputs are the same in {('training' if training else 'evaluation')} ''') print(output, ref_output) else: print(f''' outputs are not the same in {('training' if training else 'evaluation')} ''') print(output, ref_output) is_ok = False g1 = make_dot(output) g2 = make_dot(ref_output) g1.save('p_output') g2.save('ref_output') print('saved dot files: p_output ref_output') if check_grads: ref_output.backward() torch.cuda.synchronize() del ref_output ref_grads = dict() shared = dict() for (name, p) in ref_model.named_parameters(): ref_grads[name] = p.grad = p.grad.cpu() if (p.grad in shared): print(f'{name} is {shared[p.grad]}') shared[p.grad] = name torch.cuda.synchronize() print() output.backward() torch.cuda.synchronize() del output partitioned_grads = dict() shared = dict() for idx in range(cmd_args.n_partitions): for (name, p) in analysis_config.stage_to_model[idx].named_parameters(): partitioned_grads[name] = p.grad = p.grad.cpu() if (p.grad in shared): print(f'{name} is {shared[p.grad]}') shared[p.grad] = name torch.cuda.synchronize() for (name, g) in partitioned_grads.items(): assert isinstance(g, torch.Tensor) if (not (name in ref_grads)): msg = f'{name} is missing in ref_grads' assert (name == 'lm_head.weight') is_same = torch.allclose(ref_grads['shared_embed_weight'], partitioned_grads['lm_head.weight']) msg += (' but grad is the same' if is_same else ' and grad is different') print(msg) elif (not torch.allclose(g, ref_grads[name])): abs_error = torch.abs((g - ref_grads[name])) max_abs = abs_error.max() abs_error = (abs_error.sum() / abs_error.numel()) print(f'{name} grad is different avg_abs {abs_error} N {g.numel()} max_abs {max_abs}') is_ok = False else: pass return is_ok
def make_dot(var): node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12')) seen = set() def add_nodes(var): if (var not in seen): if isinstance(var, torch.Tensor): value = (('(' + ', '.join([('%d' % v) for v in var.size()])) + ')') dot.node(str(id(var)), str(value), fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) add_nodes(var.grad_fn) return dot
def run(rank, size, hostname): print(f'I am {rank} of {size} in {hostname}') tensor = torch.zeros(1) if (rank == 0): tensor += 1 dist.send(tensor=tensor, dst=1) else: dist.recv(tensor=tensor, src=0) print('Rank ', rank, ' has data ', tensor[0])
def init_processes(rank, size, hostname, fn, backend='tcp'): ' Initialize the distributed environment. ' dist.init_process_group(backend, rank=rank, world_size=size) fn(rank, size, hostname)
def run(rank, size, hostname): print(f'I am {rank} of {size} in {hostname}') tensor = torch.zeros(1).cuda() if (rank == 0): tensor += 1 dist.send(tensor=tensor, dst=1) else: dist.recv(tensor=tensor, src=0) print('Rank ', rank, ' has data ', tensor[0])
def init_processes(rank, size, hostname, fn, backend='tcp'): ' Initialize the distributed environment. ' dist.init_process_group(backend, rank=rank, world_size=size) fn(rank, size, hostname)
def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if (args.n_gpu > 0): torch.cuda.manual_seed_all(args.seed)
def to_list(tensor): return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer): ' Train the model ' if (args.local_rank in [(- 1), 0]): tb_writer = SummaryWriter() args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu)) train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset)) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if (args.max_steps > 0): t_total = args.max_steps args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1) else: t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))): optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt'))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt'))) if args.fp16: try: from apex import amp except ImportError: raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.') (model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if (args.n_gpu > 1): model = torch.nn.DataParallel(model) if (args.local_rank != (- 1)): model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_dataset)) logger.info(' Num Epochs = %d', args.num_train_epochs) logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size) logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1))) logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps) logger.info(' Total optimization steps = %d', t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 if os.path.exists(args.model_name_or_path): try: checkpoint_suffix = args.model_name_or_path.split('-')[(- 1)].split('/')[0] global_step = int(checkpoint_suffix) epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps)) steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps)) logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(' Continuing training from epoch %d', epochs_trained) logger.info(' Continuing training from global step %d', global_step) logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch) except ValueError: logger.info(' Starting fine-tuning.') (tr_loss, logging_loss) = (0.0, 0.0) model.zero_grad() train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0])) set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0])) for (step, batch) in enumerate(epoch_iterator): if (steps_trained_in_current_epoch > 0): steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple((t.to(args.device) for t in batch)) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'start_positions': batch[3], 'end_positions': batch[4]} if (args.model_type in ['xlm', 'roberta', 'distilbert', 'camembert']): del inputs['token_type_ids'] if (args.model_type in ['xlnet', 'xlm']): inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) if args.version_2_with_negative: inputs.update({'is_impossible': batch[7]}) if (hasattr(model, 'config') and hasattr(model.config, 'lang2id')): inputs.update({'langs': (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}) outputs = model(**inputs) loss = outputs[0] if (args.n_gpu > 1): loss = loss.mean() if (args.gradient_accumulation_steps > 1): loss = (loss / args.gradient_accumulation_steps) if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (((step + 1) % args.gradient_accumulation_steps) == 0): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)): if ((args.local_rank == (- 1)) and args.evaluate_during_training): results = evaluate(args, model, tokenizer) for (key, value) in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step) logging_loss = tr_loss if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)): output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if (not os.path.exists(output_dir)): os.makedirs(output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving model checkpoint to %s', output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt')) torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt')) logger.info('Saving optimizer and scheduler states to %s', output_dir) if ((args.max_steps > 0) and (global_step > args.max_steps)): epoch_iterator.close() break if ((args.max_steps > 0) and (global_step > args.max_steps)): train_iterator.close() break if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step))
def evaluate(args, model, tokenizer, prefix=''): (dataset, examples, features) = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])): os.makedirs(args.output_dir) args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu)) eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))): model = torch.nn.DataParallel(model) logger.info('***** Running evaluation {} *****'.format(prefix)) logger.info(' Num examples = %d', len(dataset)) logger.info(' Batch size = %d', args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc='Evaluating'): model.eval() batch = tuple((t.to(args.device) for t in batch)) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2]} if (args.model_type in ['xlm', 'roberta', 'distilbert', 'camembert']): del inputs['token_type_ids'] feature_indices = batch[3] if (args.model_type in ['xlnet', 'xlm']): inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) if (hasattr(model, 'config') and hasattr(model.config, 'lang2id')): inputs.update({'langs': (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}) outputs = model(**inputs) for (i, feature_index) in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] if (len(output) >= 5): start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult(unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits) else: (start_logits, end_logits) = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = (timeit.default_timer() - start_time) logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, (evalTime / len(dataset))) output_prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(prefix)) output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions_{}.json'.format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds_{}.json'.format(prefix)) else: output_null_log_odds_file = None if (args.model_type in ['xlnet', 'xlm']): start_n_top = (model.config.start_n_top if hasattr(model, 'config') else model.module.config.start_n_top) end_n_top = (model.config.end_n_top if hasattr(model, 'config') else model.module.config.end_n_top) predictions = compute_predictions_log_probs(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: predictions = compute_predictions_logits(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer) results = squad_evaluate(examples, predictions) return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if ((args.local_rank not in [(- 1), 0]) and (not evaluate)): torch.distributed.barrier() input_dir = (args.data_dir if args.data_dir else '.') cached_features_file = os.path.join(input_dir, 'cached_{}_{}_{}'.format(('dev' if evaluate else 'train'), list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length))) if (os.path.exists(cached_features_file) and (not args.overwrite_cache)): logger.info('Loading features from cached file %s', cached_features_file) features_and_dataset = torch.load(cached_features_file) (features, dataset, examples) = (features_and_dataset['features'], features_and_dataset['dataset'], features_and_dataset['examples']) else: logger.info('Creating features from dataset file at %s', input_dir) if ((not args.data_dir) and ((evaluate and (not args.predict_file)) or ((not evaluate) and (not args.train_file)))): try: import tensorflow_datasets as tfds except ImportError: raise ImportError('If not data_dir is specified, tensorflow_datasets needs to be installed.') if args.version_2_with_negative: logger.warning('tensorflow_datasets does not handle version 2 of SQuAD.') tfds_examples = tfds.load('squad') examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = (SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()) if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) (features, dataset) = squad_convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=(not evaluate), return_dataset='pt', threads=args.threads) if (args.local_rank in [(- 1), 0]): logger.info('Saving features into cached file %s', cached_features_file) torch.save({'features': features, 'dataset': dataset, 'examples': examples}, cached_features_file) if ((args.local_rank == 0) and (not evaluate)): torch.distributed.barrier() if output_examples: return (dataset, examples, features) return dataset
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_TYPES))) parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.') parser.add_argument('--data_dir', default=None, type=str, help=('The input data dir. Should contain the .json files for the task.' + 'If no data dir or train/predict files are specified, will run with tensorflow_datasets.')) parser.add_argument('--train_file', default=None, type=str, help=('The input training file. If a data dir is specified, will look for the file there' + 'If no data dir or train/predict files are specified, will run with tensorflow_datasets.')) parser.add_argument('--predict_file', default=None, type=str, help=('The input evaluation file. If a data dir is specified, will look for the file there' + 'If no data dir or train/predict files are specified, will run with tensorflow_datasets.')) parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name') parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.') parser.add_argument('--max_seq_length', default=384, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.') parser.add_argument('--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.') parser.add_argument('--max_query_length', default=64, type=int, help='The maximum number of tokens for the question. Questions longer than this will be truncated to this length.') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.') parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.') parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.') parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.') parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.') parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.') parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.') parser.add_argument('--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.') parser.add_argument('--max_answer_length', default=30, type=int, help='The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.') parser.add_argument('--verbose_logging', action='store_true', help='If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal SQuAD evaluation.') parser.add_argument('--lang_id', default=0, type=int, help='language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)') parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.') parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.') parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number') parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available') parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory') parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.') parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.') parser.add_argument('--threads', type=int, default=1, help='multiple threads for converting example to features') args = parser.parse_args() if (args.doc_stride >= (args.max_seq_length - args.max_query_length)): logger.warning("WARNING - You've set a doc stride which may be superior to the document length in some examples. This could result in errors when building features from the examples. Please reduce the doc stride or increase the maximum length to ensure the features are correctly built.") if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)): raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir)) if (args.server_ip and args.server_port): import ptvsd print('Waiting for debugger attach') ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() if ((args.local_rank == (- 1)) or args.no_cuda): device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu')) args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count()) else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (args.local_rank in [(- 1), 0]) else logging.WARN)) logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))), args.fp16) set_seed(args) if (args.local_rank not in [(- 1), 0]): torch.distributed.barrier() args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), cache_dir=(args.cache_dir if args.cache_dir else None)) tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None)) model = AutoModelForQuestionAnswering.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=(args.cache_dir if args.cache_dir else None)) if (args.local_rank == 0): torch.distributed.barrier() model.to(args.device) logger.info('Training/evaluation parameters %s', args) if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.') if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) (global_step, tr_loss) = train(args, train_dataset, model, tokenizer) logger.info(' global_step = %s, average loss = %s', global_step, tr_loss) if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])): os.makedirs(args.output_dir) logger.info('Saving model checkpoint to %s', args.output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) results = {} if (args.do_eval and (args.local_rank in [(- 1), 0])): if args.do_train: logger.info('Loading checkpoints saved during training for evaluation') checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(((args.output_dir + '/**/') + WEIGHTS_NAME), recursive=True)))) logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN) else: logger.info('Loading checkpoint %s for evaluation', args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info('Evaluate the following checkpoints: %s', checkpoints) for checkpoint in checkpoints: global_step = (checkpoint.split('-')[(- 1)] if (len(checkpoints) > 1) else '') model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=global_step) result = dict((((k + ('_{}'.format(global_step) if global_step else '')), v) for (k, v) in result.items())) results.update(result) logger.info('Results: {}'.format(results)) return results
class TextDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512): assert os.path.isfile(file_path) block_size = (block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)) (directory, filename) = os.path.split(file_path) cached_features_file = os.path.join(directory, ((((args.model_type + '_cached_lm_') + str(block_size)) + '_') + filename)) if (os.path.exists(cached_features_file) and (not args.overwrite_cache)): logger.info('Loading features from cached file %s', cached_features_file) with open(cached_features_file, 'rb') as handle: self.examples = pickle.load(handle) else: logger.info('Creating features from dataset file at %s', directory) self.examples = [] with open(file_path, encoding='utf-8') as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, ((len(tokenized_text) - block_size) + 1), block_size): self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:(i + block_size)])) logger.info('Saving features into cached file %s', cached_features_file) with open(cached_features_file, 'wb') as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.examples) def __getitem__(self, item): return torch.tensor(self.examples[item], dtype=torch.long)
class LineByLineTextDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512): assert os.path.isfile(file_path) logger.info('Creating features from dataset file at %s', file_path) with open(file_path, encoding='utf-8') as f: lines = [line for line in f.read().splitlines() if ((len(line) > 0) and (not line.isspace()))] self.examples = tokenizer.batch_encode_plus(lines, add_special_tokens=True, max_length=block_size)['input_ids'] def __len__(self): return len(self.examples) def __getitem__(self, i): return torch.tensor(self.examples[i], dtype=torch.long)
def load_and_cache_examples(args, tokenizer, evaluate=False): file_path = (args.eval_data_file if evaluate else args.train_data_file) if args.line_by_line: return LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size) else: return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)