code
stringlengths
17
6.64M
def build_compute_metrics_fn(task_name: str) -> Callable[([EvalPrediction], Dict)]: try: output_mode = glue_output_modes[task_name] except KeyError: raise ValueError(('Task not found: %s' % task_name)) def compute_metrics_fn(p: EvalPrediction): if (output_mode == 'classification'): preds = np.argmax(p.predictions, axis=1) elif (output_mode == 'regression'): preds = np.squeeze(p.predictions) return glue_compute_metrics(task_name, preds, p.label_ids) return compute_metrics_fn
def glue_data_dir(DATA_DIR): return os.path.join(DATA_DIR, 'glue_data')
def make_just_y(ds, **kw): y = [feature.label for feature in ds] y = torch.tensor(y) return TensorDataset(y)
def get_extended_attention_mask(attention_mask, input_ids, dtype=torch.float32): ' Extented attention mask, removing the preprocessing from inside to outside, bert' if (attention_mask is None): attention_mask = torch.ones_like(input_ids) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=dtype) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) return extended_attention_mask
def make_just_x(ds, **kw): d = defaultdict(list) for feature in ds: for (key, val) in vars(feature).items(): if (key == 'label'): continue if (val is None): continue d[key].append(val) print(d.keys()) if ('attention_mask' in d): if kw['precompute_attention_mask']: print('-I- precomputing attention mask') batch = list(d.values()) b1 = torch.tensor(batch[1]) b0 = torch.tensor(batch[0]) attetion_mask = get_extended_attention_mask(b1, b0) d['attention_mask'] = attetion_mask return TensorDataset(*[torch.tensor(x) for x in d.values()])
def make_just_by_ds(ds, just, **kw): assert isinstance(just, list) A = set((MAP_NAMES_TO_FEATURES[i] for i in just)) if kw['is_last_partition']: A |= LAST_PARTITION_EXTRA_LABELS d = defaultdict(list) for feature in ds: for (key, val) in vars(feature).items(): if (val is None): continue if (key in A): d[key].append(val) print(d.keys()) if ('attention_mask' in d): if kw['precompute_attention_mask']: print('-I- precomputing attention mask') b1 = torch.tensor(d['attention_mask']) if ('input_ids' in d): b0 = torch.tensor(d['input_ids']) else: b0 = torch.tensor([feature.input_ids for feature in ds]) attetion_mask = get_extended_attention_mask(b1, b0) d['attention_mask'] = attetion_mask ll = [] for x in d.values(): if (not isinstance(x, torch.Tensor)): x = torch.tensor(x) ll.append(x) return TensorDataset(*ll)
def getitem(t): if isinstance(t, dict): res = {i: getitem(v) for (i, v) in t.items()} else: try: res = t.item() except: res = t return res
def get_just_x_or_y_train_dev_dataset(just, DATA_DIR, **kw): ' get x or y datset. ' tokenizer = kw['tokenizer'] task_name = kw['task_name'] max_seq_length = kw['max_seq_length'] overwrite_cache = kw['overwrite_cache'] is_last_partition = kw.get('is_last_partition') precompute_attention_mask = kw['precompute_attention_mask'] data_dir = os.path.join(DATA_DIR, TASK_NAME_TO_DATA_DIR[task_name]) args = GlueDataTrainingArguments(task_name=task_name, data_dir=data_dir, max_seq_length=max_seq_length, overwrite_cache=overwrite_cache) print('-I- creating datasets...') train_ds = GlueDataset(args, tokenizer, mode='train') dev_ds = GlueDataset(args, tokenizer, mode='dev') if (just == 'x'): just_f = make_just_x elif (just == 'y'): just_f = make_just_y elif isinstance(just, list): just_f = make_just_by_ds else: raise NotImplementedError() train_ds = just_f(train_ds, just=just, precompute_attention_mask=precompute_attention_mask, is_last_partition=is_last_partition) dev_ds = just_f(dev_ds, just=just, precompute_attention_mask=precompute_attention_mask, is_last_partition=is_last_partition) print('-I- done creating datasets') partial_evaluate = build_compute_metrics_fn(task_name) num_labels = glue_tasks_num_labels[task_name] def evaluate_glue(self): global_step = self.fit_res.num_epochs print('Evaluating Glue on CPU') predictions = torch.cat(self.predictions, dim=0).cpu().numpy() label_ids = torch.cat(self.label_ids, dim=0).cpu().numpy() self.predictions.clear() self.label_ids.clear() ep = EvalPrediction(predictions, label_ids) result = partial_evaluate(ep) try: print(result) except: print('evaluate_glue: failed to print result') if (not hasattr(self.fit_res, 'glue_results')): self.fit_res.glue_results = dict() self.fit_res.glue_results[global_step] = getitem(result) def set_eval(trainer): trainer.loss_fn = GlueLoss(num_labels) trainer.statistics.evaluate_glue = types.MethodType(evaluate_glue, trainer.statistics) trainer.statistics.set_glue_task(task_name) return (train_ds, dev_ds, set_eval)
class SEP_GLUE_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() d = extract_needed_keywords(**kw) (train_ds, dev_ds, extra) = get_just_x_or_y_train_dev_dataset(**d) self.train_ds = train_ds self.dev_ds = dev_ds self.extra = extra def get_train_ds(self, **kw): return self.train_ds def get_test_ds(self, **kw): return self.dev_ds def get_validation_ds(self, **kw): NotImplementedError() def get_modify_trainer_fn(self): return self.extra
def extract_needed_keywords(**kw): args = kw['args'] dataset_keywords = dict(tokenizer=kw['tokenizer'], overwrite_cache=getattr(args, 'overwrite_cache', False), task_name=getattr(args, 'glue_task_name'), max_seq_length=getattr(args, 'max_seq_length', 128), precompute_masks=getattr(args, 'precompute_masks', False), precompute_attention_mask=getattr(args, 'precompute_attention_mask', False), is_last_partition=(args.stage == (args.num_stages - 1))) return dataset_keywords
class TextDataset(Dataset): def __init__(self, tokenizer, model_name_or_path, overwrite_cache=False, file_path='train', block_size=512): assert os.path.isfile(file_path), file_path (directory, filename) = os.path.split(file_path) cached_features_file = os.path.join(directory, ((((model_name_or_path + '_cached_lm_') + str(block_size)) + '_') + filename)) if (os.path.exists(cached_features_file) and (not overwrite_cache)): print(f'Loading from cahced feature file: {cached_features_file}') with open(cached_features_file, 'rb') as handle: self.examples = pickle.load(handle) else: self.examples = [] with open(file_path, encoding='utf-8') as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, ((len(tokenized_text) - block_size) + 1), block_size): self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:(i + block_size)])) with open(cached_features_file, 'wb') as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.examples) def __getitem__(self, item): return torch.tensor(self.examples[item])
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, mlm_probability=0.15, generator=None) -> Tuple[(torch.Tensor, torch.Tensor)]: ' Prepare masked tokens inputs/labels for masked language modeling:\n 80% MASK, 10% random, 10% original.\n\n Usage:\n inputs, labels = mask_tokens(batch, tokenizer, args.mlm_probability, generator) if args.mlm else (batch, batch)\n ' if (tokenizer.mask_token is None): raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.') labels = inputs.clone() probability_matrix = torch.full(labels.shape, mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if (tokenizer._pad_token is not None): padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix, generator=generator).bool() labels[(~ masked_indices)] = (- 100) indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8), generator=generator).bool() & masked_indices) inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced)) random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long, generator=generator) inputs[indices_random] = random_words[indices_random] return (inputs, labels)
def mask_tokens_just_inputs(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, mlm_probability=0.15, generator=None) -> Tuple[(torch.Tensor, torch.Tensor)]: ' Prepare masked tokens inputs/labels for masked language modeling:\n 80% MASK, 10% random, 10% original.\n\n Usage:\n inputs, labels = mask_tokens(batch, tokenizer, args.mlm_probability, generator) if args.mlm else (batch, batch)\n ' if (tokenizer.mask_token is None): raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.') labels = inputs probability_matrix = torch.full(labels.shape, mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if (tokenizer._pad_token is not None): padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix, generator=generator).bool() indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8), generator=generator).bool() & masked_indices) inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced)) random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long, generator=generator) inputs[indices_random] = random_words[indices_random] return inputs
def mask_tokens_just_labels(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, mlm_probability=0.15, generator=None) -> Tuple[(torch.Tensor, torch.Tensor)]: ' Prepare masked tokens inputs/labels for masked language modeling:\n 80% MASK, 10% random, 10% original.\n\n Usage:\n inputs, labels = mask_tokens(batch, tokenizer, args.mlm_probability, generator) if args.mlm else (batch, batch)\n ' if (tokenizer.mask_token is None): raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.') labels = inputs probability_matrix = torch.full(labels.shape, mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if (tokenizer._pad_token is not None): padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix, generator=generator).bool() labels[(~ masked_indices)] = (- 100) return labels
def get_wikitext2_raw_train_valid_test_ds(model_name_or_path, tokenizer, block_size=512, overwrite_cache=False, DATA_DIR=DEFAULT_DATA_DIR, split='all'): wt2_data_path = os.path.join(DATA_DIR, 'wikitext-2-raw') train_file = os.path.join(wt2_data_path, 'wiki.train.raw') valid_file = os.path.join(wt2_data_path, 'wiki.valid.raw') test_file = os.path.join(wt2_data_path, 'wiki.test.raw') def get_ds(file_path): return TextDataset(tokenizer, model_name_or_path, overwrite_cache=overwrite_cache, file_path=file_path, block_size=block_size) if (split == 'all'): train_ds = get_ds(train_file) valid_ds = get_ds(valid_file) test_ds = get_ds(test_file) return (train_ds, valid_ds, test_ds) elif (split == 'train'): train_ds = get_ds(train_file) return train_ds elif (split == 'valid'): valid_ds = get_ds(valid_file) return valid_ds elif (split == 'test'): test_ds = get_ds(test_file) return test_ds else: raise ValueError(f'Unsupported split {split}.')
def get_wikitext2_raw_train_test_ds(model_name_or_path, tokenizer, train_seq_len=512, test_seq_len=512, overwrite_cache=False, DATA_DIR=DEFAULT_DATA_DIR): ' Returns train and test datasets ' train_ds = get_wikitext2_raw_train_valid_test_ds(model_name_or_path=model_name_or_path, tokenizer=tokenizer, split='train', block_size=train_seq_len, overwrite_cache=overwrite_cache) test_ds = get_wikitext2_raw_train_valid_test_ds(model_name_or_path=model_name_or_path, tokenizer=tokenizer, split='test', block_size=test_seq_len, overwrite_cache=overwrite_cache) return (train_ds, test_ds)
def get_wikitext2_raw_train_valid_ds(model_name_or_path, tokenizer, train_seq_len=512, valid_seq_len=512, overwrite_cache=False, DATA_DIR=DEFAULT_DATA_DIR): train_ds = get_wikitext2_raw_train_valid_test_ds(model_name_or_path=model_name_or_path, tokenizer=tokenizer, split='train', block_size=train_seq_len, overwrite_cache=overwrite_cache) valid_ds = get_wikitext2_raw_train_valid_test_ds(model_name_or_path=model_name_or_path, tokenizer=tokenizer, split='valid', block_size=valid_seq_len, overwrite_cache=overwrite_cache) return (train_ds, valid_ds)
def get_wikitext2_raw_test_ds(model_name_or_path, tokenizer, test_seq_len=512, overwrite_cache=False, DATA_DIR=DEFAULT_DATA_DIR): test_ds = get_wikitext2_raw_train_valid_test_ds(model_name_or_path=model_name_or_path, tokenizer=tokenizer, split='test', block_size=test_seq_len, overwrite_cache=overwrite_cache) return test_ds
def lm_collate(tokenizer, examples: List[torch.Tensor]): if (tokenizer._pad_token is None): return pad_sequence(examples, batch_first=True) return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
def lm_collate_factory(tokenizer): assert (tokenizer is not None) return functools.partial(lm_collate, tokenizer)
def get_lm_train_dl(ds_train, bs_train, tokenizer=None, collate_fn=None, shuffle=True, **kw): collate = (collate_fn if collate_fn else lm_collate_factory(tokenizer)) train_sampler = RandomSampler(ds_train) train_dl = DataLoader(ds_train, shuffle=False, sampler=train_sampler, batch_size=bs_train, collate_fn=collate, **kw) return train_dl
def get_lm_eval_dl(ds_eval, bs_eval, tokenizer=None, shuffle=False, collate_fn=None, **kw): collate = (collate_fn if collate_fn else lm_collate_factory(tokenizer)) eval_sampler = SequentialSampler(ds_eval) eval_dl = DataLoader(bs_eval, sampler=eval_sampler, batch_size=bs_eval, shuffle=False, collate_fn=collate, **kw) return eval_dl
def get_lm_train_valid_dl(ds_train, ds_test, bs_train, bs_test, tokenizer=None, **kw): if ('collate_fn' not in kw): collate = lm_collate_factory(tokenizer) kw['collate_fn'] = collate train_dl = get_lm_train_dl(ds_train, bs_train, **kw) valid_dl = get_lm_eval_dl(ds_test, bs_test, **kw) return (train_dl, valid_dl)
class SEP_WIKITEXT2_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() d = extract_needed_keywords(**kw) (train_ds, test_ds) = get_wikitext2_raw_train_test_ds(**d) self.train_ds = train_ds self.test_ds = test_ds tokenizer = kw['tokenizer'] self.collate_fn = lm_collate_factory(tokenizer) def get_train_ds(self, **kw): return self.train_ds def get_test_ds(self, **kw): return self.test_ds def get_validation_ds(self, **kw): NotImplementedError() def modify_dataloader_keywords(self, dataloader_keywords): dataloader_keywords['collate_fn'] = self.collate_fn return dataloader_keywords
def extract_needed_keywords(**kw): args = kw['args'] tokenizer = kw['tokenizer'] overwrite_cache = getattr(args, 'overwrite_cache', False) d = dict(model_name_or_path=args.model_name_or_path, tokenizer=tokenizer, train_seq_len=args.train_seq_len, test_seq_len=args.test_seq_len, overwrite_cache=overwrite_cache) return d
def load_and_cache_examples_just_x_or_y(just, model_name_or_path, max_seq_length, doc_stride, max_query_length, threads, tokenizer, DATA_DIR, evaluate=False, output_examples=False, overwrite_cache=True, save=False, version_2_with_negative=False, **kw): squad_dir = get_squad_dir(DATA_DIR, version_2_with_negative) input_dir = squad_dir train_file = get_train_file(squad_dir, version_2_with_negative) predict_file = get_predict_file(squad_dir, version_2_with_negative) if isinstance(just, list): just_name_for_cached = '_'.join(just) else: just_name_for_cached = just cached_features_file = os.path.join(input_dir, 'cached_just_{}_{}_{}_{}_{}'.format(just_name_for_cached, ('dev' if evaluate else 'train'), list(filter(None, model_name_or_path.split('/'))).pop(), str(max_seq_length), ('lp' if kw['is_last_partition'] else 'p'))) if (os.path.exists(cached_features_file) and (not overwrite_cache)): print('Loading features from cached file %s', cached_features_file) features_and_dataset = torch.load(cached_features_file) (features, dataset, examples) = (features_and_dataset['features'], features_and_dataset['dataset'], features_and_dataset['examples']) else: examples = make_examples(DATA_DIR, train_file, predict_file, evaluate, version_2_with_negative) do_all_lw = dict(do_all_cls_index=False, do_all_p_mask=False, do_all_is_impossible=False) (features, dataset) = squad_convert_examples_to_features_just_x_or_y(just=just, examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=(not evaluate), return_dataset='pt', threads=threads, **do_all_lw, **kw) if save: print('Saving features into cached file %s', cached_features_file) torch.save({'features': features, 'dataset': dataset, 'examples': examples}, cached_features_file) if output_examples: return (dataset, examples, features) return dataset
def squad_convert_examples_to_features_just_x_or_y(just, examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset='pt', threads=1, do_all_cls_index=False, do_all_p_mask=False, do_all_is_impossible=False, **kw): "\n Converts a list of examples into a list of features that can be directly given as input to a model.\n It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.\n\n Args:\n just: 'x' or 'y'.\n examples: list of :class:`~transformers.data.processors.squad.SquadExample`\n tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`\n max_seq_length: The maximum sequence length of the inputs.\n doc_stride: The stride used when the context is too large and is split across several features.\n max_query_length: The maximum length of the query.\n is_training: whether to create features for model evaluation or model training.\n return_dataset: Default False. Either 'pt' or 'tf'.\n if 'pt': returns a torch.data.TensorDataset,\n if 'tf': returns a tf.data.Dataset\n threads: multiple processing threadsa-smi\n\n do_all_cls_index, do_all_p_mask, do_all_is_impossible: control creation of redundent stuff.\n\n Returns:\n list of :class:`~transformers.data.processors.squad.SquadFeatures`\n\n Example::\n\n processor = SquadV2Processor()\n examples = processor.get_dev_examples(data_dir)\n\n features = squad_convert_examples_to_features(\n just=just\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n do_all_cls_index=False, do_all_p_mask=False, do_all_is_impossible=False\n )\n\n TODO: for is_training=False the implementation is not implemented.\n (examples, etc)\n " features = [] threads = min(threads, cpu_count()) with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: annotate_ = partial(squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=is_training, padding_strategy='max_length') features = list(tqdm(p.imap(annotate_, examples, chunksize=32), total=len(examples), desc='convert squad examples to features')) new_features = [] unique_id = 1000000000 example_index = 0 for example_features in tqdm(features, total=len(features), desc='add example index and unique id'): if (not example_features): continue for example_feature in example_features: example_feature.example_index = example_index example_feature.unique_id = unique_id new_features.append(example_feature) unique_id += 1 example_index += 1 features = new_features del new_features if (return_dataset != 'pt'): raise NotImplementedError() all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_cls_index = (torch.tensor([f.cls_index for f in features], dtype=torch.long) if do_all_cls_index else None) all_p_mask = (torch.tensor([f.p_mask for f in features], dtype=torch.float) if do_all_p_mask else None) all_is_impossible = (torch.tensor([f.is_impossible for f in features], dtype=torch.float) if do_all_is_impossible else None) if (not is_training): if (just == 'x'): dataset = TensorDataset(*filter((lambda x: (x is not None)), [all_input_ids, all_attention_masks, all_token_type_ids, all_cls_index, all_p_mask])) elif (just == 'y'): all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_start_positions, all_end_positions, all_example_index) elif isinstance(just, list): dd = dict(input0=all_input_ids, input1=all_attention_masks, input2=all_token_type_ids, input3=all_cls_index, input4=all_p_mask, attention_mask=all_attention_masks, input_ids=all_input_ids, token_type_ids=all_token_type_ids, cls_index=all_cls_index, p_mask=all_p_mask) d = {i: v for (i, v) in dd.items() if (i in just)} print(d.keys()) update_on_precomputed_attention_mask(all_attention_masks, all_input_ids, d, kw) if kw['is_last_partition']: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) d['all_start_positions'] = all_start_positions d['all_end_positions'] = all_end_positions d['all_example_index'] = all_example_index dataset = get_dataset_by_just(d, just) else: raise ValueError(f'just should be x or y, got {just}') elif (just == 'x'): dataset = TensorDataset(*filter((lambda x: (x is not None)), [all_input_ids, all_attention_masks, all_token_type_ids, all_cls_index, all_p_mask, all_is_impossible])) elif (just == 'y'): all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_start_positions, all_end_positions) elif isinstance(just, list): dd = dict(input0=all_input_ids, input1=all_attention_masks, input2=all_token_type_ids, input3=all_cls_index, input4=all_p_mask, input5=all_is_impossible, attention_mask=all_attention_masks, input_ids=all_input_ids, token_type_ids=all_token_type_ids, cls_index=all_cls_index, p_mask=all_p_mask) d = {i: v for (i, v) in dd.items() if (i in just)} print('keys in dataset', d.keys()) update_on_precomputed_attention_mask(all_attention_masks, all_input_ids, d, kw) if kw['is_last_partition']: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) d['all_start_positions'] = all_start_positions d['all_end_positions'] = all_end_positions dataset = get_dataset_by_just(d, just) else: raise ValueError(f'got {just}') return (features, dataset)
def get_dataset_by_just(d, just): l = [] for name in just: l.append(d[name]) if ('all_start_positions' in d): l.append(d['all_start_positions']) if ('all_end_positions' in d): l.append(d['all_end_positions']) if ('all_example_index' in d): l.append(d['all_example_index']) dataset = TensorDataset(*l) return dataset
def train_just(just, DATA_DIR, **kw): train_ds = load_and_cache_examples_just_x_or_y(just=just, DATA_DIR=DATA_DIR, evaluate=False, output_examples=False, **kw) return train_ds
def dev_just(just, DATA_DIR, **kw): (dev_ds, examples, features) = load_and_cache_examples_just_x_or_y(just=just, DATA_DIR=DATA_DIR, evaluate=True, output_examples=True, **kw) return (dev_ds, examples, features)
def getitem(t): if isinstance(t, dict): res = {i: getitem(v) for (i, v) in t.items()} else: try: res = t.item() except: res = t return res
def get_just_x_or_y_train_dev_dataset(just, DATA_DIR, **kw): ' get x or y datset. ' train_ds = load_and_cache_examples_just_x_or_y(just=just, DATA_DIR=DATA_DIR, evaluate=False, output_examples=False, **kw) print('squad', 'version_2_with_negative', kw['version_2_with_negative']) (dev_ds, examples, features) = load_and_cache_examples_just_x_or_y(just=just, DATA_DIR=DATA_DIR, evaluate=True, output_examples=True, **kw) if (len(dev_ds.tensors) == 0): warnings.warn('setting dev_ds to None since pytorch TensorDataset can handle empty datasets') dev_ds = None tokenizer = kw['tokenizer'] args = SimpleNamespace(**kw) partial_evaluate = partial(evaluate, examples, features, tokenizer, args) def evaluate_squad(self): global_step = self.fit_res.num_epochs result = partial_evaluate(self.all_results, prefix=global_step) print(dict((((k + ('_{}'.format(global_step) if global_step else '')), getitem(v)) for (k, v) in result.items()))) if (not hasattr(self.fit_res, 'squad_results')): self.fit_res.squad_results = dict() self.fit_res.squad_results[global_step] = getitem(result) def set_features(trainer): trainer.features = features trainer.statistics.evaluate_squad = types.MethodType(evaluate_squad, trainer.statistics) return (train_ds, dev_ds, set_features)
def get_squad_dir(DATA_DIR, version_2_with_negative: bool): if version_2_with_negative: res = os.path.join(DATA_DIR, 'squad2') else: res = os.path.join(DATA_DIR, 'squad1') return res
def get_train_file(squad_dir, version_2_with_negative): if version_2_with_negative: res = os.path.join(squad_dir, 'train-v2.0.json') else: res = os.path.join(squad_dir, 'train-v1.1.json') return res
def get_predict_file(squad_dir, version_2_with_negative): if version_2_with_negative: res = os.path.join(squad_dir, 'dev-v2.0.json') else: res = os.path.join(squad_dir, 'dev-v1.1.json') return res
def make_examples(DATA_DIR, train_file, predict_file, evaluate, version_2_with_negative): ' In case we not loading them ' processor = (SquadV2Processor() if version_2_with_negative else SquadV1Processor()) if evaluate: examples = processor.get_dev_examples(DATA_DIR, filename=predict_file) else: examples = processor.get_train_examples(DATA_DIR, filename=train_file) return examples
def update_on_precomputed_attention_mask(all_attention_masks, all_input_ids, d, kw): if (('input1' in d) or ('attention_mask' in d)): if kw.get('precompute_attention_mask', False): name = 'attention_mask' if ('input1' in d): warnings.warn('name input1 is deprecated.') name = 'input1' assert ('attention_mask' not in d) print('-I- precomputing attention mask to save data-transfers. (pre-processing should be outside the model)') d[name] = get_extended_attention_mask(all_attention_masks, all_input_ids)
def evaluate(examples, features, tokenizer, args, all_results, config=None, prefix=''): ' Called after we have all results\n TODO: replace args?\n ' print('Evaluating Squad on CPU') if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir) output_prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(prefix)) output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions_{}.json'.format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds_{}.json'.format(prefix)) else: output_null_log_odds_file = None if (args.model_type in ['xlnet', 'xlm']): if (config is None): raise ValueError('need transformer.config to infer few args...') start_n_top = config.start_n_top end_n_top = config.end_n_top predictions = compute_predictions_log_probs(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: predictions = compute_predictions_logits(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer) results = squad_evaluate(examples, predictions) return results
def get_extended_attention_mask(attention_mask, input_ids, dtype=torch.float32): ' Extented attention mask, removing the preprocessing from inside to outside, bert' if (attention_mask is None): attention_mask = torch.ones_like(input_ids) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=dtype) extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0)) return extended_attention_mask
class SEP_SQUAD_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() d = extract_needed_keywords(**kw) (train_ds, test_ds, extra) = get_just_x_or_y_train_dev_dataset(kw['just'], kw['DATA_DIR'], **d) self.train_ds = train_ds self.dev_ds = test_ds self.extra = extra def get_train_ds(self, **kw): return self.train_ds def get_test_ds(self, **kw): return self.dev_ds def get_validation_ds(self, **kw): NotImplementedError() def get_modify_trainer_fn(self): return self.extra
def extract_needed_keywords(**kw): args = kw['args'] tokenizer = kw['tokenizer'] overwrite_cache = getattr(args, 'overwrite_cache', False) version_2_with_negative = (args.dataset == 'squad2') if hasattr(args, 'version_2_with_negative'): assert (version_2_with_negative == args.version_2_with_negative), (version_2_with_negative, args.version_2_with_negative) else: print(f'-W- version_2_with_negative inferred automatically as {version_2_with_negative}. args.dataset: {args.dataset}.') dataset_keywords = dict(model_name_or_path=args.model_name_or_path, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, threads=args.threads, version_2_with_negative=version_2_with_negative, save=True, overwrite_cache=overwrite_cache, precompute_attention_mask=getattr(args, 'precompute_attention_mask', False)) n_best_size = getattr(args, 'n_best_size', 20) max_answer_length = getattr(args, 'max_answer_length', 30) do_lower_case = getattr(args, 'do_lower_case', False) verbose_logging = getattr(args, 'verbose_logging', False) null_score_diff_threshold = getattr(args, 'null_score_diff_threshold', 0.0) model_type = getattr(args, 'model_type') output_dir = getattr(args, 'output_dir') d = dict(n_best_size=n_best_size, max_answer_length=max_answer_length, do_lower_case=do_lower_case, verbose_logging=verbose_logging, version_2_with_negative=version_2_with_negative, null_score_diff_threshold=null_score_diff_threshold, model_type=model_type, output_dir=output_dir, is_last_partition=(args.stage == (args.num_stages - 1))) dataset_keywords.update(d) return dataset_keywords
def density(x): return (np.count_nonzero(x) / np.prod(x.shape))
def analyze_packing(mixture_or_task_name, sequence_length, dataset_split='train', packed_ds=None): if (packed_ds is None): packed_ds = like_mtf(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split, pack=True) ds = packed_ds def create_record(packed_example): x = packed_example return {'input_seq_length': (x['inputs_position'].max() + 1), 'target_seq_length': (x['targets_position'].max() + 1), 'npacked': x['targets_segmentation'].max(), 'target_density': density(x['targets']), 'input_density': density(x['inputs'])} df = pd.DataFrame.from_records([create_record(x) for x in ds.as_numpy_iterator()]) return df
def analyze_padding(mixture_or_task_name, sequence_length, dataset_split='train', padded_ds=None): if (padded_ds is None): padded_ds = like_mtf(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split, pack=False) ds = padded_ds def create_record(padded_example): x = padded_example return {'input_seq_length': np.count_nonzero(x['inputs']), 'target_seq_length': np.count_nonzero(x['targets']), 'npacked': 1, 'target_density': density(x['targets']), 'input_density': density(x['inputs'])} df = pd.DataFrame.from_records([create_record(x) for x in ds.as_numpy_iterator()]) return df
def infer_no_truncation_padding_seq_length(df): sequence_length = {'inputs': df['input_seq_length'].max(), 'targets': df['target_seq_length'].max()} return sequence_length
def infer_no_truncation_padding_seq_length_all_splits(mixture_or_task_name, sequence_length, splits=['train', 'validation']): df = pd.concat([analyze_padding(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split) for dataset_split in splits]) return infer_no_truncation_padding_seq_length(df)
def infer_no_truncation_padding_seq_length_for_all_t5_available_tasks(): names = t5_tasks_we_want() sequence_length = {'inputs': 512, 'targets': 512} splits = ['train', 'validation'] res = {} for mixture_or_task_name in names: req = infer_no_truncation_padding_seq_length_all_splits(mixture_or_task_name, sequence_length, splits=splits) res[mixture_or_task_name] = req return res
def t5_tasks_we_want(): return ['glue_cola_v002', 'glue_sst2_v002', 'glue_qqp_v002', 'glue_mrpc_v002', 'glue_stsb_v002', 'glue_qnli_v002', 'glue_rte_v002', 'glue_wnli_v002', 'super_glue_boolq_v102', 'super_glue_cb_v102', 'super_glue_copa_v102', 'super_glue_multirc_v102', 'super_glue_record_v102', 'super_glue_rte_v102', 'super_glue_wic_v102', 'squad_v010_allanswers']
def glue_rte_v002(): mixture_or_task_name = 'glue_rte_v002' sequence_length = {'inputs': 512, 'targets': 87} dataset_split = 'train' df_packing = analyze_packing(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split) df_padding = analyze_padding(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split) return (df_packing, df_padding)
def sum_task(mixture_or_task_name, dataset_split='train', add_percentiles=True): sequence_length = {'inputs': 512, 'targets': 512} df_packing = analyze_packing(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split) df_padding = analyze_padding(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split) print((40 * '=')) print('-I- mixture_or_task_name', mixture_or_task_name) print('-I- packing:') print(df_packing.describe(percentiles=[0.5, 0.75, 0.9, 0.99])) print('-I- padding:') described_padding = df_padding.describe(percentiles=[0.5, 0.75, 0.9, 0.99]) print(described_padding) splits = ['train'] npacked = df_packing['npacked'].mean() ntrain = len(df_padding) sequence_length_req = infer_no_truncation_padding_seq_length(df_padding) record = {'mixture_or_task_name': mixture_or_task_name, 'max_input': sequence_length_req['inputs'], 'max_targets': sequence_length_req['targets'], 'npacked': npacked, 'examples': ntrain} if add_percentiles: percs_input = {f'input_seq_length_{i}%': described_padding['input_seq_length'][f'{i}%'] for i in [50, 75, 90, 99]} percs_target = {f'target_seq_length_{i}%': described_padding['target_seq_length'][f'{i}%'] for i in [50, 75, 90, 99]} record.update(percs_input) record.update(percs_target) print('-I summary:') pprint(record) print((40 * '=')) return record
def load_huggingface_checkpoint(args, cp_number, spread_across_devices=True, **kwargs): hf_transformers_model_class = T5ForConditionalGeneration loader = NewT5HFLoader(hf_transformers_model_class=hf_transformers_model_class) if (cp_number == 'c4'): model_name_or_path = args.model_name_or_path print(f'-I- Will evaluate {model_name_or_path}, no further finetuining') (hugg, tokenizer, config) = loader.get_hf_original_model_tokenizer_and_config(model_name_or_path) else: add_to_prefix = f'_{cp_number}' (hugg, extra) = loader.load_from_saved_pipeline(args, to_original=True, add_to_prefix=add_to_prefix, **kwargs) config = extra['config'] tokenizer = extra['tokenizer'] return (hugg, tokenizer, config)
class T5Evaluator(): 'Slightly patched with features' def __init__(self, args, model_dir, device, model: T5ForConditionalGeneration=None, spread_across_devices=True, use_existing_model_next_loads=True): super().__init__() self._model: T5ForConditionalGeneration = None self._writer = SummaryWriter(model_dir) self._model_dir = model_dir if isinstance(device, str): device = torch.device(device) self._device = device self.spread_across_devices = spread_across_devices if (model is not None): self._model = model if (self._device.type == 'cuda'): self._model.to(device) self.to_tensor = functools.partial(torch.as_tensor, device=self._device) self.args = args self.use_existing_model_next_loads = use_existing_model_next_loads def load_checkpoint(self, cp_number): use_existing = self.use_existing_model_next_loads kwargs = dict() if (use_existing and (self._model is not None) and (getattr(self, '_tokenizer', None) is not None) and (getattr(self, '_config', None) is not None)): try: kwargs['model'] = self._model kwargs['tokenizer'] = self._tokenizer kwargs['config'] = self._config except Exception as e: kwargs.pop('model', None) kwargs.pop('tokenizer', None) kwargs.pop('config', None) (hugg, tokenizer, config) = load_huggingface_checkpoint(args=self.args, spread_across_devices=self.spread_across_devices, cp_number=cp_number, **kwargs) self._model = hugg if use_existing: self._tokenizer = tokenizer self._config = config self._step = cp_number if self.spread_across_devices: assert isinstance(hugg, T5ForConditionalGeneration) hugg: T5ForConditionalGeneration hugg.parallelize(device_map=None) def get_all_checkpoint_steps(self): raise NotImplementedError() def eval(self, mixture_or_task_name, sequence_length, batch_size, checkpoint_steps=None, summary_dir=None, split='validation', **generate_kwargs): 'Evaluate the model on the given Mixture or Task.\n\n *Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not\n None`), the model\'s state will be replaced by the state in those\n checkpoints. If you have not saved your model before calling `eval`, you\n should call `save_checkpoint` before `eval` to avoid losing its parameter\n values and state.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to evaluate\n on. Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n sequence_length: dict of int, a dict mapping feature name to length.\n batch_size: int, the number of padded sequences in each batch.\n checkpoint_steps: int, list of ints, "all", or None. If None, eval in the\n model in its current state without loading any checkpoints. If an int\n or list of ints, evaluation will be run on the checkpoint files in\n `model_dir` whose global steps are those provided. If -1, eval on the\n latest checkpoint from the model directory. If "all", evaluate all\n checkpoints in the model directory.\n summary_dir: str, path to write TensorBoard events file summaries for\n eval. If None, use model_dir/{split}_eval.\n split: str, the mixture/task split to evaluate on.\n **generate_kwargs: Additional keyword arguments to pass to\n `transformers.PretrainedModel.generate()`, for example to change the\n decoding strategy. See the documentation for\n `transformers.PretrainedModel.generate()` for options.\n ' import t5 import tensorflow.compat.v1 as tf try: get_dataset = t5.models.hf_model.get_dataset except AttributeError: get_dataset = t5.models.hf_model._get_dataset mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name) vocab = mixture_or_task.output_features['targets'].vocabulary if isinstance(mixture_or_task, t5.data.Mixture): tasks = mixture_or_task.tasks elif isinstance(mixture_or_task, t5.data.Task): tasks = [mixture_or_task] else: raise NotImplementedError() for task in tasks: if (split not in task.splits): logging.info("Task %s has no '%s' split; skipping eval.", task.name, split) summary_dir = (summary_dir or os.path.join(self._model_dir, f'{split}_eval')) tf.io.gfile.makedirs(summary_dir) def _unbatch(batch): 'Converts a dict of lists to a list of dicts of singletons.' return [dict(zip(batch, t)) for t in zip(*batch.values())] cached_targets = {} cached_examples = {} for task in tasks: if task.metric_fns: ds = get_dataset(task.name, sequence_length, split, batch_size) batches = list(ds) if (not batches): raise ValueError(f"The '{split}' split of {task.name} is empty.") examples = [ex for b in batches for ex in _unbatch(b)] targets = [task.postprocess_fn(tf.compat.as_text(ex['targets_plaintext']), example=ex, is_target=True) for ex in examples] targets_filename = os.path.join(summary_dir, f'{task.name}_targets') write_lines_to_file(targets, targets_filename) inputs_filename = os.path.join(summary_dir, f'{task.name}_inputs') inputs = [ex['inputs_plaintext'] for ex in examples] write_lines_to_file(inputs, inputs_filename) cached_targets[task.name] = targets cached_examples[task.name] = batches def _eval_current_model(): self._model.eval() all_scores = {} for task in tasks: ds = cached_examples[task.name] targets = cached_targets[task.name] predictions = [] for batch in tqdm(ds, desc='Evaluating'): predicted_tokens = self._model.generate(input_ids=self.to_tensor(batch['inputs']), **generate_kwargs) predicted_tokens = predicted_tokens.cpu().numpy().tolist() predictions.extend([task.postprocess_fn(vocab.decode(p), example=ex) for (p, ex) in zip(predicted_tokens, _unbatch(batch))]) if (len(targets) != len(predictions)): raise ValueError(f'#targets ({len(targets)}) != #predictions ({len(predictions)})') predictions_file = os.path.join(summary_dir, f'{task.name}_{self._step}_predictions') write_lines_to_file(predictions, predictions_file) for metric_fn in task.metric_fns: scores = metric_fn(targets, predictions) for (metric_name, metric_value) in scores.items(): tag = f'eval/{task.name}/{metric_name}' step = (self._step if isinstance(self._step, int) else (- 1)) self._writer.add_scalar(tag, metric_value, step) logging.info(f'{tag} at step {step}: {metric_value:.3f}') all_scores[tag] = metric_value self._writer.flush() return all_scores if (checkpoint_steps is None): raise NotImplementedError() elif isinstance(checkpoint_steps, int): checkpoint_steps = [checkpoint_steps] elif (checkpoint_steps == 'all'): checkpoint_steps = self.get_all_checkpoint_steps() elif (not isinstance(checkpoint_steps, (list, tuple))): raise ValueError(f'checkpoint_steps must be None, int or list; got {checkpoint_steps}') all_results = {} for checkpoint_step in checkpoint_steps: try: self.load_checkpoint(checkpoint_step) results = _eval_current_model() print('partial result:', checkpoint_step, results) all_results[checkpoint_step] = results except Exception as e: warnings.warn(f'ignoring exception {str(e)}') if (len(all_results) == 0): raise RuntimeError('could not evaluate any checkpoint') return all_results
def write_lines_to_file(lines, filename): import tensorflow.compat.v1 as tf 'Write each line to filename, replacing the file if it exists.' if tf.io.gfile.exists(filename): tf.io.gfile.remove(filename) with tf.io.gfile.GFile(filename, 'w') as output_file: output_file.write('\n'.join([str(l) for l in lines]))
def get_t5_sequence_length_from_args(args): return {'inputs': args.max_seq_length, 'targets': args.answer_max_seq_length}
def evaluate_t5_tfds(args, cp_number, device='cpu'): DIR_NAME = 'results/t5_eval_dir/' model_dir = os.path.join(DIR_NAME, auto_file_name(args)) batch_size = getattr(args, 'single_worker_eval_batch_size', 32) generate_kwargs = getattr(args, 'generate_kwargs', {}) evaluator = T5Evaluator(args, model_dir=model_dir, device=device, model=None) results = evaluator.eval(mixture_or_task_name=args.mixture_or_task_name, sequence_length=get_t5_sequence_length_from_args(args), batch_size=batch_size, checkpoint_steps=cp_number, split='validation', summary_dir=None, **generate_kwargs) return results
def load_huggingface_checkpoint(args, cp_number, spread_across_devices=True, **kwargs): if spread_across_devices: hf_transformers_model_class = ModelParallelT5ForConditionalGeneration else: hf_transformers_model_class = T5ForConditionalGeneration loader = T5HFLoader(hf_transformers_model_class=hf_transformers_model_class) if (cp_number == 'c4'): model_name_or_path = args.model_name_or_path print(f'-I- Will evaluate {model_name_or_path}, no further finetuining') (hugg, tokenizer, config) = loader.get_hf_original_model_tokenizer_and_config(model_name_or_path) else: add_to_prefix = f'_{cp_number}' (hugg, extra) = loader.load_from_saved_pipeline(args, to_original=True, add_to_prefix=add_to_prefix, **kwargs) config = extra['config'] tokenizer = extra['tokenizer'] return (hugg, tokenizer, config)
class T5Evaluator(): 'Slightly patched with features' def __init__(self, args, model_dir, device, model: T5ForConditionalGeneration=None, spread_across_devices=True, use_existing_model_next_loads=True): super().__init__() self._model: T5ForConditionalGeneration = None self._writer = torch.utils.tensorboard.writer.SummaryWriter(model_dir) self._model_dir = model_dir if isinstance(device, str): device = torch.device(device) self._device = device self.spread_across_devices = spread_across_devices if (model is not None): self._model = model if (self._device.type == 'cuda'): self._model.to(device) self.to_tensor = functools.partial(torch.as_tensor, device=self._device) self.args = args self.use_existing_model_next_loads = use_existing_model_next_loads def load_checkpoint(self, cp_number): use_existing = self.use_existing_model_next_loads kwargs = dict() if (use_existing and (self._model is not None) and (getattr(self, '_tokenizer', None) is not None) and (getattr(self, '_config', None) is not None)): try: kwargs['model'] = self._model kwargs['tokenizer'] = self._tokenizer kwargs['config'] = self._config except Exception as e: kwargs.pop('model', None) kwargs.pop('tokenizer', None) kwargs.pop('config', None) (hugg, tokenizer, config) = load_huggingface_checkpoint(args=self.args, spread_across_devices=self.spread_across_devices, cp_number=cp_number, **kwargs) self._model = hugg if use_existing: self._tokenizer = tokenizer self._config = config self._step = cp_number if self.spread_across_devices: assert isinstance(hugg, ModelParallelT5ForConditionalGeneration) hugg: ModelParallelT5ForConditionalGeneration hugg.spread_on_devices(devices=None) if (self._device.type == 'cuda'): self._model.to(self._device) def get_all_checkpoint_steps(self): raise NotImplementedError() def eval(self, mixture_or_task_name, sequence_length, batch_size, checkpoint_steps=None, summary_dir=None, split='validation', **generate_kwargs): 'Evaluate the model on the given Mixture or Task.\n\n *Note*: If a checkpoint step is provided (i.e. `checkpoint_steps is not\n None`), the model\'s state will be replaced by the state in those\n checkpoints. If you have not saved your model before calling `eval`, you\n should call `save_checkpoint` before `eval` to avoid losing its parameter\n values and state.\n\n Args:\n mixture_or_task_name: str, the name of the Mixture or Task to evaluate\n on. Must be pre-registered in the global `t5.data.TaskRegistry` or\n `t5.data.MixtureRegistry.`\n sequence_length: dict of int, a dict mapping feature name to length.\n batch_size: int, the number of padded sequences in each batch.\n checkpoint_steps: int, list of ints, "all", or None. If None, eval in the\n model in its current state without loading any checkpoints. If an int\n or list of ints, evaluation will be run on the checkpoint files in\n `model_dir` whose global steps are those provided. If -1, eval on the\n latest checkpoint from the model directory. If "all", evaluate all\n checkpoints in the model directory.\n summary_dir: str, path to write TensorBoard events file summaries for\n eval. If None, use model_dir/{split}_eval.\n split: str, the mixture/task split to evaluate on.\n **generate_kwargs: Additional keyword arguments to pass to\n `transformers.PretrainedModel.generate()`, for example to change the\n decoding strategy. See the documentation for\n `transformers.PretrainedModel.generate()` for options.\n ' import t5 import tensorflow.compat.v1 as tf get_dataset = t5.models.hf_model.get_dataset mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name) vocab = mixture_or_task.output_features['targets'].vocabulary if isinstance(mixture_or_task, t5.data.Mixture): tasks = mixture_or_task.tasks elif isinstance(mixture_or_task, t5.data.Task): tasks = [mixture_or_task] else: raise NotImplementedError() for task in tasks: if (split not in task.splits): logging.info("Task %s has no '%s' split; skipping eval.", task.name, split) tasks = [task for task in tasks if (split in task.splits)] summary_dir = (summary_dir or os.path.join(self._model_dir, f'{split}_eval')) tf.io.gfile.makedirs(summary_dir) def _unbatch(batch): 'Converts a dict of lists to a list of dicts of singletons.' return [dict(zip(batch, t)) for t in zip(*batch.values())] cached_targets = {} cached_examples = {} for task in tasks: if task.metric_fns: ds = get_dataset(task.name, sequence_length, split, batch_size) batches = list(ds) if (not batches): raise ValueError(f"The '{split}' split of {task.name} is empty.") examples = [ex for b in batches for ex in _unbatch(b)] targets = [task.postprocess_fn(tf.compat.as_text(ex['targets_plaintext']), example=ex, is_target=True) for ex in examples] targets_filename = os.path.join(summary_dir, f'{task.name}_targets') write_lines_to_file(targets, targets_filename) inputs_filename = os.path.join(summary_dir, f'{task.name}_inputs') inputs = [ex['inputs_plaintext'] for ex in examples] write_lines_to_file(inputs, inputs_filename) cached_targets[task.name] = targets cached_examples[task.name] = batches def _eval_current_model(): self._model.eval() all_scores = {} for task in tasks: ds = cached_examples[task.name] targets = cached_targets[task.name] predictions = [] for batch in tqdm(ds, desc='Evaluating'): predicted_tokens = self._model.generate(input_ids=self.to_tensor(batch['inputs']), **generate_kwargs) predicted_tokens = predicted_tokens.cpu().numpy().tolist() predictions.extend([task.postprocess_fn(vocab.decode(p), example=ex) for (p, ex) in zip(predicted_tokens, _unbatch(batch))]) if (len(targets) != len(predictions)): raise ValueError(f'#targets ({len(targets)}) != #predictions ({len(predictions)})') predictions_file = os.path.join(summary_dir, f'{task.name}_{self._step}_predictions') write_lines_to_file(predictions, predictions_file) for metric_fn in task.metric_fns: scores = metric_fn(targets, predictions) for (metric_name, metric_value) in scores.items(): tag = f'eval/{task.name}/{metric_name}' step = (self._step if isinstance(self._step, int) else (- 1)) self._writer.add_scalar(tag, metric_value, step) logging.info(f'{tag} at step {step}: {metric_value:.3f}') all_scores[tag] = metric_value self._writer.flush() return all_scores if (checkpoint_steps is None): raise NotImplementedError() elif isinstance(checkpoint_steps, int): checkpoint_steps = [checkpoint_steps] elif (checkpoint_steps == 'all'): checkpoint_steps = self.get_all_checkpoint_steps() elif (not isinstance(checkpoint_steps, (list, tuple))): raise ValueError(f'checkpoint_steps must be None, int or list; got {checkpoint_steps}') all_results = {} for checkpoint_step in checkpoint_steps: try: self.load_checkpoint(checkpoint_step) results = _eval_current_model() print('partial result:', checkpoint_step, results) all_results[checkpoint_step] = results except Exception as e: warnings.warn(f'ignoring exception {str(e)}') if (len(all_results) == 0): raise RuntimeError('could not evaluate any checkpoint') return all_results
def write_lines_to_file(lines, filename): import tensorflow.compat.v1 as tf 'Write each line to filename, replacing the file if it exists.' if tf.io.gfile.exists(filename): tf.io.gfile.remove(filename) with tf.io.gfile.GFile(filename, 'w') as output_file: output_file.write('\n'.join([str(l) for l in lines]))
def get_t5_sequence_length_from_args(args): return {'inputs': args.max_seq_length, 'targets': args.answer_max_seq_length}
def evaluate_t5_tfds(args, cp_number, device='cpu'): DIR_NAME = 'results/t5_eval_dir/' model_dir = os.path.join(DIR_NAME, auto_file_name(args)) batch_size = getattr(args, 'single_worker_eval_batch_size', 32) generate_kwargs = getattr(args, 'generate_kwargs', {}) evaluator = T5Evaluator(args, model_dir=model_dir, device=device, model=None) results = evaluator.eval(mixture_or_task_name=args.mixture_or_task_name, sequence_length=get_t5_sequence_length_from_args(args), batch_size=batch_size, checkpoint_steps=cp_number, split='validation', summary_dir=None, **generate_kwargs) return results
def get_transformations(mean, std, resize_size, crop_size, mode='train', jit_script=False): if (mode == 'train'): transform = [torchvision.transforms.Resize((resize_size, resize_size)), torchvision.transforms.RandomCrop((crop_size, crop_size)), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)] else: transform = [torchvision.transforms.Resize((crop_size, crop_size)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean, std)] if jit_script: transform = torch.nn.Sequential(*transform) transform = torch.jit.script(transform) else: transform = torchvision.transforms.Compose(transform) return transform
def cifar10_transformations(jit_script=False, resize_size=384, crop_size=384): mean = np.array(CIFAR10_DEFAULT_MEAN) std = np.array(CIFAR10_DEFAULT_STD) train_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='train', jit_script=jit_script) test_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='test', jit_script=jit_script) return (train_transform, test_transform)
def cifar100_transformations(jit_script=False, resize_size=384, crop_size=384): mean = np.array(CIFAR100_DEFAULT_MEAN) std = np.array(CIFAR100_DEFAULT_STD) train_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='train', jit_script=jit_script) test_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='test', jit_script=jit_script) return (train_transform, test_transform)
def imagenet_transformations(jit_script=False, resize_size=384, crop_size=384): mean = IMAGENET_DEFAULT_MEAN std = IMAGENET_DEFAULT_STD train_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='train', jit_script=jit_script) test_transform = get_transformations(mean=mean, std=std, crop_size=crop_size, resize_size=resize_size, mode='test', jit_script=jit_script) return (train_transform, test_transform)
def sep_imagenet_handler_factory(resize_size=384, crop_size=384): class SepImagenetAutoGenDatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = imagenet_transformations(resize_size=resize_size, crop_size=crop_size) return get_imagenet_just_x_or_y_ds(transform=train_transform, train=True, **kw) def get_test_ds(self, **kw): (_, test_transform) = imagenet_transformations(resize_size=resize_size, crop_size=crop_size) return get_imagenet_just_x_or_y_ds(transform=test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError() return SepImagenetAutoGenDatasetHandler
class SepCifar10_384_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = cifar10_transformations(resize_size=384, crop_size=384) return get_cifar_10_just_x_or_y_ds(transform=train_transform, train=True, **kw) def get_test_ds(self, **kw): (_, test_transform) = cifar10_transformations(resize_size=384, crop_size=384) return get_cifar_10_just_x_or_y_ds(transform=test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError()
class SepCifar100_384_DatasetHandler(CommonDatasetHandler): def __init__(self, **kw): super().__init__() def get_train_ds(self, **kw): (train_transform, _) = cifar100_transformations(resize_size=384, crop_size=384) return get_cifar_100_just_x_or_y_ds(transform=train_transform, train=True, **kw) def get_test_ds(self, **kw): (_, test_transform) = cifar100_transformations(resize_size=384, crop_size=384) return get_cifar_100_just_x_or_y_ds(transform=test_transform, train=False, **kw) def get_validation_ds(self, **kw): NotImplementedError()
def infer_all_cps(args) -> int: if (args.epochs > 0): n_cps = args.epochs if (getattr(args, 'save_checkpoint_every_x_steps', None) is not None): warnings.warn(f'Miss-Estimated number of checkpoints due args.save_checkpoint_every_x_steps={args.save_checkpoint_every_x_steps}') elif (args.steps > 0): local_rank = 0 args.rank = 0 args.local_rank = local_rank args.is_multiprocessing_worker = False handler = AVAILABLE_MODELS.get(args.model) parsed_config = parse_config.PartitioningConfigParser(args.model, args.rank, args.bs_train, args.bs_test, handler=None, send_target_in_pipe=('_nonsep' in args.data_propagator), prefer_seq_sends=getattr(args, 'prefer_seq_sends', True)) dataset_keywords = {} extra_kw = handler.get_extra() if isinstance(extra_kw, dict): dataset_keywords.update(extra_kw) del handler pipe_config = parsed_config.pipe_config args.num_stages = parsed_config.num_stages args.stage = parsed_config.stage_id from pipe.data import get_dataloaders (train_dl, test_dl, samplers, extra) = get_dataloaders(args, pipe_config=pipe_config, dataset_keywords=dataset_keywords) train_dl_len = len(train_dl) save_checkpoint_every_x_epochs = approximate_checkpoint_every_x_epochs(args, train_dl_len) left_batches = (args.steps * args.step_every) n = 0 n_cps = 0 while (left_batches > 0): left_batches -= train_dl_len n += 1 if ((n % save_checkpoint_every_x_epochs) == 0): n_cps += 1 else: raise NotImplementedError() return n_cps
def get_all_eval_results(args): explicit_eval_cp = getattr(args, 'explicit_eval_cp', None) if (explicit_eval_cp is not None): all_cps = [explicit_eval_cp] print(f'Got explicit_eval_cp={explicit_eval_cp}. changing out_file_name') args.out_filename = ((explicit_eval_cp + '_') + args.out_filename) else: all_cps = list(range(0, infer_all_cps(args))) print(f'-I- evaluating {len(all_cps)}: {all_cps}') if (args.dataset == 't5_tfds'): device = getattr(args, 'eval_device', 'cpu') if (not isinstance(device, list)): import transformers if (transformers.__version__ > '4.1.1'): from pipe.data.t5 import new_t5_tfds_eval all_results = pipe.data.t5.new_t5_tfds_eval.evaluate_t5_tfds(args, cp_number=all_cps, device=device) else: from pipe.data.t5 import t5_tfds_eval all_results = pipe.data.t5.t5_tfds_eval.evaluate_t5_tfds(args, cp_number=all_cps, device=device) else: raise NotImplementedError() else: raise NotImplementedError() return all_results
def is_json(fn): return ('.json' in fn)
def all_files(path): file_names = [] for (root, dirs, files) in os.walk(path, topdown=True): for name in files: if is_json(name): fn = os.path.join(root, name) file_names.append(fn) return file_names
class InferStuff(): def __init__(self, config, fit_res): self.config = config self.fit_res = fit_res stat_to_default = {'step_every': 1} def get_from_cfg(stat): return (config[stat] if (stat in config) else stat_to_default[stat]) self.interesting_from_config = {i: get_from_cfg(i) for i in ['model', 'dataset', 'seed', 'bs_train', 'step_every']} self.all_data = {} self.infer_experiment_names() self.max_len = 0 def fix_model_name(self): pass def infer_num_partitions(self): pass def infer_experiment_names(self): wp = ('weight_prediction' in self.config) ga = ('gap_aware' in self.config) ws = (('weight_stashing' in self.config) and self.config['weight_stashing']) pipedream = (('work_scheduler' in self.config) and (self.config['work_scheduler'].lower() == 'pipedream')) sync = (('is_sync' in self.config) and self.config['is_sync']) ddp = (('ddp' in self.config) and self.config['ddp']) wp_name = ('wp' if wp else ('stale' if (not sync) else '')) ga_name = ('ga' if ga else '') ws_name = ('ws' if ws else '') pipedream_name = ('pipedream' if pipedream else '') sync_name = ('sync' if sync else '') ddp = ('ddp' if ddp else '') if ddp: sync_name = '' names = filter(None, [wp_name, ga_name, ws_name, pipedream_name, sync_name, ddp]) alg = '_'.join(names) to_add = dict(wp=wp, ga=ga, ws=ws, alg=alg) self.interesting_from_config = {**self.interesting_from_config, **to_add} def merge(self, new_data): to_delete = {} for (i, v) in new_data.items(): if (i in self.all_data): to_delete.add(i) assert (i == 'epoch') if (len(v) > len(self.all_data[i])): self.all_data[i] = v self.max_len = len(v) d = {} for (i, v) in new_data.items(): if (i not in to_delete): d[i] = v self.all_data = {**self.all_data, **d} def fix_gap_for_last_p(self, attr, data, length, gaps): if (('gap' in attr) and (len(data) == 0)): l_index = f'p{(len(gaps) - 1)}' if (l_index in attr): return np.zeros(length) def infer_epoch_attrs(self): attrs = [] p = itertools.product(['train', 'test'], ['loss', 'acc']) traintestlossacc = [f'{traintest}_{lossacc}' for (traintest, lossacc) in p] gaps = [key for key in self.fit_res.keys() if ('gap' in key)] norms = [key for key in self.fit_res.keys() if ('grad_norm' in key)] attrs.extend(traintestlossacc) attrs.extend(gaps) attrs.extend(norms) attrs = [attr for attr in attrs if (attr in self.fit_res)] all_data = {} length = None for attr in attrs: if isinstance(self.fit_res, NamedTuple): data = getattr(self.fit_res, attr) else: data = self.fit_res[attr] if (not length): length = len(data) self.max_len = length all_data['epoch'] = np.arange(1, (len(data) + 1)) elif (len(data) != length): new_data = self.fix_gap_for_last_p(attr, data, length, gaps) if (not (new_data is None)): data = new_data else: raise NotImplementedError(f'Supported only for same length: attr:{attr}, len(data):{len(data)} len:{length}') all_data[attr] = data self.merge(all_data) def replicate(self): ' Replicate stuff ' for (i, v) in self.interesting_from_config.items(): self.all_data[i] = ([v] * self.max_len) def to_df(self): return pd.DataFrame(self.all_data)
def process_file(f): ' Returns a dataframe ' (config, fit_res) = load_experiment(f) inferer = InferStuff(config, fit_res) inferer.infer_epoch_attrs() inferer.replicate() return inferer.to_df()
def all_results_to_csv(root_paths, csv_name): if isinstance(root_paths, str): root_paths = [root_paths] files = [] for root_path in root_paths: files += all_files(root_path) print(f'-I- There are {len(files)} json files in {root_paths}') print('-I- Creating....') df = pd.concat([process_file(f) for f in files], sort=False) print(f'-I- Created df.shape: {df.shape}') print(f'-I- Writing csv: {csv_name}') df.to_csv(csv_name, index=False) print('-I- Done')
def print_uniques(csv, cols=['alg', 'bs_train', 'model', 'dataset', 'seed', 'step_every']): df = pd.read_csv(csv) var_to_uniques = {var: pd.unique(df[var]) for var in cols} var_to_len_uniques = {i: len(v) for (i, v) in var_to_uniques.items()} print(f'-I- Describing csv: {csv}') print(f'-I- Analyzed cols: {cols}') print('-I- length_uniques:') print(var_to_len_uniques) print('-I- uniques:') print(var_to_uniques)
def try_to_move_from_cfg_to_fit_res(config, fit_res, stats_names=STATS_SAVED_IN_ARGS): for name in stats_names: if (name in config): fit_res[name] = config[name] del config[name]
def add_plot(fn, legened, fig=None, plot_fn=plot.plot_fit, try_to_move=True): (config, fit_res) = load_experiment(fn) if try_to_move: try_to_move_from_cfg_to_fit_res(config, fit_res) loss_per_batch = ('loss_per_batch' in config['statistics']) (fig, ax) = plot_fn(fit_res, fig=fig, log_loss=False, legend=legened, loss_per_batch=loss_per_batch) return (fig, ax)
def gen_plot(out_dir='results', out_base_name='current_status.png'): plt.plot() if (not os.path.exists(out_dir)): os.makedirs(out_dir) out_file_name = os.path.join(out_dir, out_base_name) plt.savefig(out_file_name) print(f'-I- Generated: "{out_file_name}"')
def gen_plot_from_dict(fn_to_contour, plot_fn, out_base_name, out_dir='results'): d = dict(fig=None, plot_fn=plot_fn) for (n, c) in fn_to_contour.items(): (d['fig'], ax) = add_plot(n, c, **d) gen_plot(out_dir=out_dir, out_base_name=f'{out_base_name}.png')
def vit_b_16_c100(): out_base_name = 'ViT_B_16_norm' out_dir = 'results/figs' plot_fn = plot.plot_grad_norm fn_to_contour = {'results/vit/cifar100/fast_dcgn_global_no_nesterov_meanstd05_vit_base_patch16_384_in21k_imagenet_384c384_8p_bw12_gpipe_acyclic_cifar100_384_gpipe_bs_512_se_16_seed_42.json': 'global'} gen_plot_from_dict(fn_to_contour, plot_fn, out_base_name, out_dir=out_dir)
def alg(fn): all_algs = ['msnag', 'aggmsnag', 'stale', 'pipedream'] for a in all_algs: ga_alg = f'{a}_ws_ga' if (ga_alg in fn): return ga_alg ws_alg = '{a}_ws' if ((ws_alg + '_') in fn): return ws_alg return ('gpipe' if ('gpipe' in fn) else ('aggmsnag' if ('aggmsnag' in fn) else ('msnag' if ('msnag' in fn) else ('stale_ws' if ('stale_ws' in fn) else ('stale' if ('stale' in fn) else 'BUG')))))
def read_desc_df(path='desc.csv'): df = pd.read_csv(path, index_col=[0, 1, 2], header=[0, 1], skipinitialspace=True) return df
def filter_desc_df_squad(desc): df = desc return df[[(i, j) for i in ['f1', 'em', 'total_time'] for j in ['mean', 'max', 'min', 'std']]]
def filter_desc_df_lm(desc): df = desc return df[[(i, j) for i in ['ppl', 'total_time'] for j in ['mean', 'max', 'min', 'std']]]
def filter_desc_df_cv(desc): df = desc return df[[(i, j) for i in ['acc', 'total_time'] for j in ['mean', 'max', 'min', 'std']]]
def write_squad_desc_df(): ls = glob.glob records = [] for f in ls('*.json'): d = {} with open(f, 'rb') as fd: r = json.load(fd) d['name'] = f d['alg'] = alg(f) d['seed'] = r['config']['seed'] d['agg'] = r['config']['step_every'] agg = d['agg'] mb = r['config']['bs_train'] d['batch'] = (agg * mb) d['total_time'] = sum(r['config']['train_epochs_times']) d['f1'] = r['results']['squad_results']['2']['f1'] d['em'] = r['results']['squad_results']['2']['exact'] records.append(d) df = pd.DataFrame.from_records(records) print(df) desc = df.groupby(['alg', 'batch', 'agg']).describe() desc = desc[['f1', 'em', 'total_time']] print(desc) desc.to_csv('desc.csv', index=True) df.to_csv('df.csv', index=False)
def write_lm_desc_df(): ls = glob.glob records = [] for f in ls('*.json'): d = {} with open(f, 'rb') as fd: r = json.load(fd) d['name'] = f d['alg'] = alg(f) d['seed'] = r['config']['seed'] d['agg'] = r['config']['step_every'] agg = d['agg'] mb = r['config']['bs_train'] d['batch'] = (agg * mb) d['total_time'] = r['config']['exp_total_time'] d['ppl'] = r['results']['test_ppl'][(- 1)] records.append(d) df = pd.DataFrame.from_records(records) print(df) desc = df.groupby(['alg', 'batch', 'agg']).describe() desc = desc[['ppl', 'total_time']] print(desc) desc.to_csv('desc.csv', index=True) df.to_csv('df.csv', index=False)
def write_cv_desc_df(): ls = glob.glob records = [] for f in ls('*.json'): d = {} with open(f, 'rb') as fd: r = json.load(fd) d['name'] = f d['alg'] = alg(f) d['seed'] = r['config']['seed'] d['agg'] = r['config']['step_every'] agg = d['agg'] mb = r['config']['bs_train'] d['batch'] = (agg * mb) d['total_time'] = r['config']['exp_total_time'] d['acc'] = r['results']['test_acc'][(- 1)] records.append(d) df = pd.DataFrame.from_records(records) print(df) desc = df.groupby(['alg', 'batch', 'agg']).describe() desc = desc[['acc', 'total_time']] print(desc) desc.to_csv('desc.csv', index=True) df.to_csv('df.csv', index=False)
def plot_loss(fit_res: Union[(NamedTuple, dict)], fig=None, log_loss=False, legend=None, loss_per_batch=False, step_every=1, original_step_every=1): if (fig is None): (fig, axes) = plt.subplots(nrows=2, ncols=1, figsize=(8, 10)) axes = axes.reshape((- 1)) else: axes = fig.axes for ax in axes: for line in ax.lines: if (line.get_label() == legend): line.remove() p = itertools.product(['train', 'test'], ['loss']) for (idx, (traintest, lossacc)) in enumerate(p): ax = axes[idx] attr = f'{traintest}_{lossacc}' if isinstance(fit_res, NamedTuple): data = getattr(fit_res, attr) else: data = fit_res[attr] data = np.asarray(data) if loss_per_batch: data = np.mean(data[:((len(data) // step_every) * step_every)].reshape((- 1), step_every), axis=1) if (traintest == 'train'): data *= original_step_every h = ax.plot(np.arange(1, (len(data) + 1)), data, label=legend) title = attr.replace('test', 'valid') ax.set_title(title) loss_name = ('Iteration' if loss_per_batch else 'Epoch') if (lossacc == 'loss'): ax.set_xlabel(f'{loss_name} #') ax.set_ylabel('Loss') if log_loss: ax.set_yscale('log') ax.set_ylabel('Loss (log)') if legend: ax.legend() return (fig, axes)
def plot_fit(fit_res: Union[(NamedTuple, dict)], fig=None, log_loss=False, legend=None, loss_per_batch=False): '\n Plots a FitResult object.\n Creates four plots: train loss, test loss, train acc, test acc.\n :param fit_res: The fit result to plot.\n :param fig: A figure previously returned from this function. If not None,\n plots will the added to this figure.\n :param log_loss: Whether to plot the losses in log scale.\n :param legend: What to call this FitResult in the legend.\n :return: The figure.\n ' if (fig is None): (fig, axes) = plt.subplots(nrows=2, ncols=2, figsize=(16, 10), sharex='col', sharey=False) axes = axes.reshape((- 1)) else: axes = fig.axes for ax in axes: for line in ax.lines: if (line.get_label() == legend): line.remove() p = itertools.product(['train', 'test'], ['loss', 'acc']) for (idx, (traintest, lossacc)) in enumerate(p): ax = axes[idx] attr = f'{traintest}_{lossacc}' if isinstance(fit_res, NamedTuple): data = getattr(fit_res, attr) else: data = fit_res[attr] h = ax.plot(np.arange(1, (len(data) + 1)), data, label=legend) ax.set_title(attr) loss_name = ('Iteration' if loss_per_batch else 'Epoch') if (lossacc == 'loss'): ax.set_xlabel(f'{loss_name} #') ax.set_ylabel('Loss') if log_loss: ax.set_yscale('log') ax.set_ylabel('Loss (log)') else: ax.set_xlabel('Epoch #') ax.set_ylabel('Accuracy (%)') if legend: ax.legend() return (fig, axes)
def plot_grad_norm(fit_res: Union[(NamedTuple, dict)], fig=None, legend=None, **kw): local_norm_key = 'local_grad_norm' total_norms = sum(((local_norm_key in key) for key in fit_res.keys())) assert ((total_norms % 2) == 0) if (fig is None): (fig, axes) = plt.subplots(nrows=(1 + (total_norms // 2)), ncols=2, figsize=(16, (3 * total_norms)), sharex=False, sharey=False) axes = axes.reshape((- 1)) else: axes = fig.axes all_norms = sorted([key for key in fit_res.keys() if (local_norm_key in key)]) p = (all_norms + ['train_acc', 'test_acc']) for (idx, attr) in enumerate(p): ax = axes[idx] if isinstance(fit_res, NamedTuple): data = getattr(fit_res, attr) else: data = fit_res[attr] h = ax.plot(np.arange(1, (len(data) + 1)), data, label=legend) ax.set_title(attr) if ('acc' in attr): ax.set_ylabel('Accuracy (%)') ax.set_xlabel('Epoch #') else: ax.set_ylabel('Norm') ax.set_xlabel('Step #') if legend: ax.legend() return (fig, axes)
def plot_gap(fit_res: Union[(NamedTuple, dict)], fig=None, legend=None, **kw): total = sum((('gap' in key) for key in fit_res.keys())) assert ((total % 2) == 0) if (fig is None): (fig, axes) = plt.subplots(nrows=(1 + (total // 2)), ncols=2, figsize=(16, 10), sharex='col', sharey=False) axes = axes.reshape((- 1)) else: axes = fig.axes all_ = [key for key in fit_res.keys() if ('gap' in key)] max_len = max((len(key) for key in all_)) for key in all_: if (len(key) == 0): key += ([0] * max_len) p = (all_ + ['train_acc', 'test_acc']) for (idx, attr) in enumerate(p): ax = axes[idx] if isinstance(fit_res, NamedTuple): data = getattr(fit_res, attr) else: data = fit_res[attr] h = ax.plot(np.arange(1, (len(data) + 1)), data, label=legend) ax.set_title(attr) ax.set_xlabel('Epoch #') if ('acc' in attr): ax.set_ylabel('Accuracy (%)') else: ax.set_ylabel('Gap [sum of L2 norms]') if legend: ax.legend() return (fig, axes)
def plot_tta(fit_res: Union[(NamedTuple, dict)], fig=None, log_loss=False, legend=None, loss_per_batch=False): time_units = 'hours' time_div_factor = {'seconds': 1, 'minutes': 60, 'hours': 3600} time_div_factor = time_div_factor.get(time_units) if loss_per_batch: raise NotImplementedError() if (fig is None): (fig, axes) = plt.subplots(nrows=2, ncols=2, figsize=(16, 10), sharex='col', sharey=False) axes = axes.reshape((- 1)) else: axes = fig.axes for ax in axes: for line in ax.lines: if (line.get_label() == legend): line.remove() p = itertools.product(['train', 'test'], ['loss', 'acc']) for (idx, (traintest, lossacc)) in enumerate(p): ax = axes[idx] attr = f'{traintest}_{lossacc}' time_attr = 'total_epoch_times' if isinstance(fit_res, NamedTuple): data = getattr(fit_res, attr) raise NotImplementedError() else: data = fit_res[attr] time = fit_res[time_attr] time = (np.array(time) / time_div_factor) time = np.cumsum(time) h = ax.plot(time, data, label=legend) ax.set_title(attr) if (lossacc == 'loss'): ax.set_xlabel(f'Time ({time_units})') ax.set_ylabel('Loss') if log_loss: ax.set_yscale('log') ax.set_ylabel('Loss (log)') else: ax.set_xlabel(f'Time ({time_units})') ax.set_ylabel('Accuracy (%)') if legend: ax.legend() return (fig, axes)
def p1(graph='test_acc'): csv = '4partitions.csv' out_file_name = f'{graph}.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100'") ax = sns.lineplot(x='epoch', y=graph, hue='alg', data=df) ax.set_title(graph) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def p1_fit_plots(): for graph in ['test_acc', 'train_acc', 'train_loss', 'test_loss']: plt.figure() p1(graph)
def p2(): csv = '4partitions.csv' out_file_name = 'output.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100'").query('epoch == 200') ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(80, 83) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def p2_2partitions(model='wrn_28x10_c100_dr03_p2'): csv = '2partitions.csv' out_file_name = f'{model}_output.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100' and model == @model").query('epoch == 200') ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(80, 83) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def p2_2partitions_16x4(model='wrn_16x4_c100_p2'): csv = '2partitions.csv' out_file_name = f'{model}_output.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar100' and model == @model").query('epoch == 200') ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(75, 78) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')
def p2_2partitions_all_models(): for model in ['wrn_16x4_c100_p2', 'wrn_28x10_c100_dr03_p2']: plt.figure() p2_2partitions(model)
def p3(): csv = '4partitions.csv' out_file_name = 'output2.png' out_file_name = os.path.join('.', out_file_name) df = pd.read_csv(csv).query("dataset == 'cifar10'").query('epoch == 200') ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df) model = pd.unique(df.model) assert (len(model) == 1) model = model[0] ax.set_ylim(94, 96) ax.set_title(model) fig = ax.get_figure() fig.savefig(out_file_name) print(f'saving file to {out_file_name}')